if_wm.c revision 1.202 1 /* $NetBSD: if_wm.c,v 1.202 2010/03/07 07:53:37 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.202 2010/03/07 07:53:37 msaitoh Exp $");
80
81 #include "rnd.h"
82
83 #include <sys/param.h>
84 #include <sys/systm.h>
85 #include <sys/callout.h>
86 #include <sys/mbuf.h>
87 #include <sys/malloc.h>
88 #include <sys/kernel.h>
89 #include <sys/socket.h>
90 #include <sys/ioctl.h>
91 #include <sys/errno.h>
92 #include <sys/device.h>
93 #include <sys/queue.h>
94 #include <sys/syslog.h>
95
96 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
97
98 #if NRND > 0
99 #include <sys/rnd.h>
100 #endif
101
102 #include <net/if.h>
103 #include <net/if_dl.h>
104 #include <net/if_media.h>
105 #include <net/if_ether.h>
106
107 #include <net/bpf.h>
108
109 #include <netinet/in.h> /* XXX for struct ip */
110 #include <netinet/in_systm.h> /* XXX for struct ip */
111 #include <netinet/ip.h> /* XXX for struct ip */
112 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
113 #include <netinet/tcp.h> /* XXX for struct tcphdr */
114
115 #include <sys/bus.h>
116 #include <sys/intr.h>
117 #include <machine/endian.h>
118
119 #include <dev/mii/mii.h>
120 #include <dev/mii/miivar.h>
121 #include <dev/mii/miidevs.h>
122 #include <dev/mii/mii_bitbang.h>
123 #include <dev/mii/ikphyreg.h>
124 #include <dev/mii/igphyreg.h>
125 #include <dev/mii/igphyvar.h>
126 #include <dev/mii/inbmphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
141
142 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
143 #else
144 #define DPRINTF(x, y) /* nothing */
145 #endif /* WM_DEBUG */
146
147 /*
148 * Transmit descriptor list size. Due to errata, we can only have
149 * 256 hardware descriptors in the ring on < 82544, but we use 4096
150 * on >= 82544. We tell the upper layers that they can queue a lot
151 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
152 * of them at a time.
153 *
154 * We allow up to 256 (!) DMA segments per packet. Pathological packet
155 * chains containing many small mbufs have been observed in zero-copy
156 * situations with jumbo frames.
157 */
158 #define WM_NTXSEGS 256
159 #define WM_IFQUEUELEN 256
160 #define WM_TXQUEUELEN_MAX 64
161 #define WM_TXQUEUELEN_MAX_82547 16
162 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
163 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
164 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
165 #define WM_NTXDESC_82542 256
166 #define WM_NTXDESC_82544 4096
167 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
168 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
169 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
170 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
171 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172
173 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
174
175 /*
176 * Receive descriptor list size. We have one Rx buffer for normal
177 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
178 * packet. We allocate 256 receive descriptors, each with a 2k
179 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180 */
181 #define WM_NRXDESC 256
182 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
183 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
184 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
185
186 /*
187 * Control structures are DMA'd to the i82542 chip. We allocate them in
188 * a single clump that maps to a single DMA segment to make several things
189 * easier.
190 */
191 struct wm_control_data_82544 {
192 /*
193 * The receive descriptors.
194 */
195 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
196
197 /*
198 * The transmit descriptors. Put these at the end, because
199 * we might use a smaller number of them.
200 */
201 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 static uint16_t swfwphysem[] = {
237 SWFW_PHY0_SM,
238 SWFW_PHY1_SM,
239 SWFW_PHY2_SM,
240 SWFW_PHY3_SM
241 };
242
243 /*
244 * Software state per device.
245 */
246 struct wm_softc {
247 device_t sc_dev; /* generic device information */
248 bus_space_tag_t sc_st; /* bus space tag */
249 bus_space_handle_t sc_sh; /* bus space handle */
250 bus_space_handle_t sc_ss; /* bus space size */
251 bus_space_tag_t sc_iot; /* I/O space tag */
252 bus_space_handle_t sc_ioh; /* I/O space handle */
253 bus_space_tag_t sc_flasht; /* flash registers space tag */
254 bus_space_handle_t sc_flashh; /* flash registers space handle */
255 bus_dma_tag_t sc_dmat; /* bus DMA tag */
256
257 struct ethercom sc_ethercom; /* ethernet common data */
258 struct mii_data sc_mii; /* MII/media information */
259
260 pci_chipset_tag_t sc_pc;
261 pcitag_t sc_pcitag;
262 int sc_bus_speed; /* PCI/PCIX bus speed */
263 int sc_pcixe_capoff; /* PCI[Xe] capability register offset */
264
265 wm_chip_type sc_type; /* MAC type */
266 int sc_rev; /* MAC revision */
267 wm_phy_type sc_phytype; /* PHY type */
268 int sc_funcid; /* unit number of the chip (0 to 3) */
269 int sc_flags; /* flags; see below */
270 int sc_if_flags; /* last if_flags */
271 int sc_flowflags; /* 802.3x flow control flags */
272 int sc_align_tweak;
273
274 void *sc_ih; /* interrupt cookie */
275 callout_t sc_tick_ch; /* tick callout */
276
277 int sc_ee_addrbits; /* EEPROM address bits */
278 int sc_ich8_flash_base;
279 int sc_ich8_flash_bank_size;
280 int sc_nvm_k1_enabled;
281
282 /*
283 * Software state for the transmit and receive descriptors.
284 */
285 int sc_txnum; /* must be a power of two */
286 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
287 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
288
289 /*
290 * Control data structures.
291 */
292 int sc_ntxdesc; /* must be a power of two */
293 struct wm_control_data_82544 *sc_control_data;
294 bus_dmamap_t sc_cddmamap; /* control data DMA map */
295 bus_dma_segment_t sc_cd_seg; /* control data segment */
296 int sc_cd_rseg; /* real number of control segment */
297 size_t sc_cd_size; /* control data size */
298 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
299 #define sc_txdescs sc_control_data->wcd_txdescs
300 #define sc_rxdescs sc_control_data->wcd_rxdescs
301
302 #ifdef WM_EVENT_COUNTERS
303 /* Event counters. */
304 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
305 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
306 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
307 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
308 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
309 struct evcnt sc_ev_rxintr; /* Rx interrupts */
310 struct evcnt sc_ev_linkintr; /* Link interrupts */
311
312 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
313 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
314 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
315 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
316 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
317 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
318 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
319 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
320
321 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
322 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
323
324 struct evcnt sc_ev_tu; /* Tx underrun */
325
326 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
327 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
328 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
329 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
330 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
331 #endif /* WM_EVENT_COUNTERS */
332
333 bus_addr_t sc_tdt_reg; /* offset of TDT register */
334
335 int sc_txfree; /* number of free Tx descriptors */
336 int sc_txnext; /* next ready Tx descriptor */
337
338 int sc_txsfree; /* number of free Tx jobs */
339 int sc_txsnext; /* next free Tx job */
340 int sc_txsdirty; /* dirty Tx jobs */
341
342 /* These 5 variables are used only on the 82547. */
343 int sc_txfifo_size; /* Tx FIFO size */
344 int sc_txfifo_head; /* current head of FIFO */
345 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
346 int sc_txfifo_stall; /* Tx FIFO is stalled */
347 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
348
349 bus_addr_t sc_rdt_reg; /* offset of RDT register */
350
351 int sc_rxptr; /* next ready Rx descriptor/queue ent */
352 int sc_rxdiscard;
353 int sc_rxlen;
354 struct mbuf *sc_rxhead;
355 struct mbuf *sc_rxtail;
356 struct mbuf **sc_rxtailp;
357
358 uint32_t sc_ctrl; /* prototype CTRL register */
359 #if 0
360 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
361 #endif
362 uint32_t sc_icr; /* prototype interrupt bits */
363 uint32_t sc_itr; /* prototype intr throttling reg */
364 uint32_t sc_tctl; /* prototype TCTL register */
365 uint32_t sc_rctl; /* prototype RCTL register */
366 uint32_t sc_txcw; /* prototype TXCW register */
367 uint32_t sc_tipg; /* prototype TIPG register */
368 uint32_t sc_fcrtl; /* prototype FCRTL register */
369 uint32_t sc_pba; /* prototype PBA register */
370
371 int sc_tbi_linkup; /* TBI link status */
372 int sc_tbi_anegticks; /* autonegotiation ticks */
373 int sc_tbi_ticks; /* tbi ticks */
374 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
375 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
376
377 int sc_mchash_type; /* multicast filter offset */
378
379 #if NRND > 0
380 rndsource_element_t rnd_source; /* random source */
381 #endif
382 };
383
384 #define WM_RXCHAIN_RESET(sc) \
385 do { \
386 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
387 *(sc)->sc_rxtailp = NULL; \
388 (sc)->sc_rxlen = 0; \
389 } while (/*CONSTCOND*/0)
390
391 #define WM_RXCHAIN_LINK(sc, m) \
392 do { \
393 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
394 (sc)->sc_rxtailp = &(m)->m_next; \
395 } while (/*CONSTCOND*/0)
396
397 #ifdef WM_EVENT_COUNTERS
398 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
399 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
400 #else
401 #define WM_EVCNT_INCR(ev) /* nothing */
402 #define WM_EVCNT_ADD(ev, val) /* nothing */
403 #endif
404
405 #define CSR_READ(sc, reg) \
406 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
407 #define CSR_WRITE(sc, reg, val) \
408 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
409 #define CSR_WRITE_FLUSH(sc) \
410 (void) CSR_READ((sc), WMREG_STATUS)
411
412 #define ICH8_FLASH_READ32(sc, reg) \
413 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
414 #define ICH8_FLASH_WRITE32(sc, reg, data) \
415 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
416
417 #define ICH8_FLASH_READ16(sc, reg) \
418 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
419 #define ICH8_FLASH_WRITE16(sc, reg, data) \
420 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
421
422 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
423 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
424
425 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
426 #define WM_CDTXADDR_HI(sc, x) \
427 (sizeof(bus_addr_t) == 8 ? \
428 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
429
430 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
431 #define WM_CDRXADDR_HI(sc, x) \
432 (sizeof(bus_addr_t) == 8 ? \
433 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
434
435 #define WM_CDTXSYNC(sc, x, n, ops) \
436 do { \
437 int __x, __n; \
438 \
439 __x = (x); \
440 __n = (n); \
441 \
442 /* If it will wrap around, sync to the end of the ring. */ \
443 if ((__x + __n) > WM_NTXDESC(sc)) { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
446 (WM_NTXDESC(sc) - __x), (ops)); \
447 __n -= (WM_NTXDESC(sc) - __x); \
448 __x = 0; \
449 } \
450 \
451 /* Now sync whatever is left. */ \
452 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
453 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
454 } while (/*CONSTCOND*/0)
455
456 #define WM_CDRXSYNC(sc, x, ops) \
457 do { \
458 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
459 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
460 } while (/*CONSTCOND*/0)
461
462 #define WM_INIT_RXDESC(sc, x) \
463 do { \
464 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
465 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
466 struct mbuf *__m = __rxs->rxs_mbuf; \
467 \
468 /* \
469 * Note: We scoot the packet forward 2 bytes in the buffer \
470 * so that the payload after the Ethernet header is aligned \
471 * to a 4-byte boundary. \
472 * \
473 * XXX BRAINDAMAGE ALERT! \
474 * The stupid chip uses the same size for every buffer, which \
475 * is set in the Receive Control register. We are using the 2K \
476 * size option, but what we REALLY want is (2K - 2)! For this \
477 * reason, we can't "scoot" packets longer than the standard \
478 * Ethernet MTU. On strict-alignment platforms, if the total \
479 * size exceeds (2K - 2) we set align_tweak to 0 and let \
480 * the upper layer copy the headers. \
481 */ \
482 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
483 \
484 wm_set_dma_addr(&__rxd->wrx_addr, \
485 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
486 __rxd->wrx_len = 0; \
487 __rxd->wrx_cksum = 0; \
488 __rxd->wrx_status = 0; \
489 __rxd->wrx_errors = 0; \
490 __rxd->wrx_special = 0; \
491 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
492 \
493 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
494 } while (/*CONSTCOND*/0)
495
496 static void wm_start(struct ifnet *);
497 static void wm_watchdog(struct ifnet *);
498 static int wm_ioctl(struct ifnet *, u_long, void *);
499 static int wm_init(struct ifnet *);
500 static void wm_stop(struct ifnet *, int);
501
502 static void wm_reset(struct wm_softc *);
503 static void wm_rxdrain(struct wm_softc *);
504 static int wm_add_rxbuf(struct wm_softc *, int);
505 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
506 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
507 static int wm_validate_eeprom_checksum(struct wm_softc *);
508 static void wm_tick(void *);
509
510 static void wm_set_filter(struct wm_softc *);
511
512 static int wm_intr(void *);
513 static void wm_txintr(struct wm_softc *);
514 static void wm_rxintr(struct wm_softc *);
515 static void wm_linkintr(struct wm_softc *, uint32_t);
516
517 static void wm_tbi_mediainit(struct wm_softc *);
518 static int wm_tbi_mediachange(struct ifnet *);
519 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
520
521 static void wm_tbi_set_linkled(struct wm_softc *);
522 static void wm_tbi_check_link(struct wm_softc *);
523
524 static void wm_gmii_reset(struct wm_softc *);
525
526 static int wm_gmii_i82543_readreg(device_t, int, int);
527 static void wm_gmii_i82543_writereg(device_t, int, int, int);
528
529 static int wm_gmii_i82544_readreg(device_t, int, int);
530 static void wm_gmii_i82544_writereg(device_t, int, int, int);
531
532 static int wm_gmii_i80003_readreg(device_t, int, int);
533 static void wm_gmii_i80003_writereg(device_t, int, int, int);
534 static int wm_gmii_bm_readreg(device_t, int, int);
535 static void wm_gmii_bm_writereg(device_t, int, int, int);
536 static int wm_gmii_hv_readreg(device_t, int, int);
537 static void wm_gmii_hv_writereg(device_t, int, int, int);
538 static int wm_sgmii_readreg(device_t, int, int);
539 static void wm_sgmii_writereg(device_t, int, int, int);
540
541 static void wm_gmii_statchg(device_t);
542
543 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
544 static int wm_gmii_mediachange(struct ifnet *);
545 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
546
547 static int wm_kmrn_readreg(struct wm_softc *, int);
548 static void wm_kmrn_writereg(struct wm_softc *, int, int);
549
550 static void wm_set_spiaddrbits(struct wm_softc *);
551 static int wm_match(device_t, cfdata_t, void *);
552 static void wm_attach(device_t, device_t, void *);
553 static int wm_detach(device_t, int);
554 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
555 static void wm_get_auto_rd_done(struct wm_softc *);
556 static void wm_lan_init_done(struct wm_softc *);
557 static void wm_get_cfg_done(struct wm_softc *);
558 static int wm_get_swsm_semaphore(struct wm_softc *);
559 static void wm_put_swsm_semaphore(struct wm_softc *);
560 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
561 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
562 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
563 static int wm_get_swfwhw_semaphore(struct wm_softc *);
564 static void wm_put_swfwhw_semaphore(struct wm_softc *);
565
566 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
567 static int32_t wm_ich8_cycle_init(struct wm_softc *);
568 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
569 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
570 uint32_t, uint16_t *);
571 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
572 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
573 static void wm_82547_txfifo_stall(void *);
574 static int wm_check_mng_mode(struct wm_softc *);
575 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
576 static int wm_check_mng_mode_82574(struct wm_softc *);
577 static int wm_check_mng_mode_generic(struct wm_softc *);
578 static int wm_check_reset_block(struct wm_softc *);
579 static void wm_get_hw_control(struct wm_softc *);
580 static int wm_check_for_link(struct wm_softc *);
581 static void wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
582 static void wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
583 static void wm_hv_phy_workaround_ich8lan(struct wm_softc *);
584 static void wm_k1_gig_workaround_hv(struct wm_softc *, int);
585 static void wm_configure_k1_ich8lan(struct wm_softc *, int);
586 static void wm_set_pcie_completion_timeout(struct wm_softc *);
587 static void wm_reset_init_script_82575(struct wm_softc *);
588
589 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
590 wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
591
592 /*
593 * Devices supported by this driver.
594 */
595 static const struct wm_product {
596 pci_vendor_id_t wmp_vendor;
597 pci_product_id_t wmp_product;
598 const char *wmp_name;
599 wm_chip_type wmp_type;
600 int wmp_flags;
601 #define WMP_F_1000X 0x01
602 #define WMP_F_1000T 0x02
603 } wm_products[] = {
604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
605 "Intel i82542 1000BASE-X Ethernet",
606 WM_T_82542_2_1, WMP_F_1000X },
607
608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
609 "Intel i82543GC 1000BASE-X Ethernet",
610 WM_T_82543, WMP_F_1000X },
611
612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
613 "Intel i82543GC 1000BASE-T Ethernet",
614 WM_T_82543, WMP_F_1000T },
615
616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
617 "Intel i82544EI 1000BASE-T Ethernet",
618 WM_T_82544, WMP_F_1000T },
619
620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
621 "Intel i82544EI 1000BASE-X Ethernet",
622 WM_T_82544, WMP_F_1000X },
623
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
625 "Intel i82544GC 1000BASE-T Ethernet",
626 WM_T_82544, WMP_F_1000T },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
629 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
630 WM_T_82544, WMP_F_1000T },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
633 "Intel i82540EM 1000BASE-T Ethernet",
634 WM_T_82540, WMP_F_1000T },
635
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
637 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
638 WM_T_82540, WMP_F_1000T },
639
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
641 "Intel i82540EP 1000BASE-T Ethernet",
642 WM_T_82540, WMP_F_1000T },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
645 "Intel i82540EP 1000BASE-T Ethernet",
646 WM_T_82540, WMP_F_1000T },
647
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
649 "Intel i82540EP 1000BASE-T Ethernet",
650 WM_T_82540, WMP_F_1000T },
651
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
653 "Intel i82545EM 1000BASE-T Ethernet",
654 WM_T_82545, WMP_F_1000T },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
657 "Intel i82545GM 1000BASE-T Ethernet",
658 WM_T_82545_3, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
661 "Intel i82545GM 1000BASE-X Ethernet",
662 WM_T_82545_3, WMP_F_1000X },
663 #if 0
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
665 "Intel i82545GM Gigabit Ethernet (SERDES)",
666 WM_T_82545_3, WMP_F_SERDES },
667 #endif
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
669 "Intel i82546EB 1000BASE-T Ethernet",
670 WM_T_82546, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
673 "Intel i82546EB 1000BASE-T Ethernet",
674 WM_T_82546, WMP_F_1000T },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
677 "Intel i82545EM 1000BASE-X Ethernet",
678 WM_T_82545, WMP_F_1000X },
679
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
681 "Intel i82546EB 1000BASE-X Ethernet",
682 WM_T_82546, WMP_F_1000X },
683
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
685 "Intel i82546GB 1000BASE-T Ethernet",
686 WM_T_82546_3, WMP_F_1000T },
687
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
689 "Intel i82546GB 1000BASE-X Ethernet",
690 WM_T_82546_3, WMP_F_1000X },
691 #if 0
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
693 "Intel i82546GB Gigabit Ethernet (SERDES)",
694 WM_T_82546_3, WMP_F_SERDES },
695 #endif
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
697 "i82546GB quad-port Gigabit Ethernet",
698 WM_T_82546_3, WMP_F_1000T },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
701 "i82546GB quad-port Gigabit Ethernet (KSP3)",
702 WM_T_82546_3, WMP_F_1000T },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
705 "Intel PRO/1000MT (82546GB)",
706 WM_T_82546_3, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
709 "Intel i82541EI 1000BASE-T Ethernet",
710 WM_T_82541, WMP_F_1000T },
711
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
713 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
714 WM_T_82541, WMP_F_1000T },
715
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
717 "Intel i82541EI Mobile 1000BASE-T Ethernet",
718 WM_T_82541, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
721 "Intel i82541ER 1000BASE-T Ethernet",
722 WM_T_82541_2, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
725 "Intel i82541GI 1000BASE-T Ethernet",
726 WM_T_82541_2, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
729 "Intel i82541GI Mobile 1000BASE-T Ethernet",
730 WM_T_82541_2, WMP_F_1000T },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
733 "Intel i82541PI 1000BASE-T Ethernet",
734 WM_T_82541_2, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
737 "Intel i82547EI 1000BASE-T Ethernet",
738 WM_T_82547, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
741 "Intel i82547EI Mobile 1000BASE-T Ethernet",
742 WM_T_82547, WMP_F_1000T },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
745 "Intel i82547GI 1000BASE-T Ethernet",
746 WM_T_82547_2, WMP_F_1000T },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
749 "Intel PRO/1000 PT (82571EB)",
750 WM_T_82571, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
753 "Intel PRO/1000 PF (82571EB)",
754 WM_T_82571, WMP_F_1000X },
755 #if 0
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
757 "Intel PRO/1000 PB (82571EB)",
758 WM_T_82571, WMP_F_SERDES },
759 #endif
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
761 "Intel PRO/1000 QT (82571EB)",
762 WM_T_82571, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
765 "Intel i82572EI 1000baseT Ethernet",
766 WM_T_82572, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
769 "Intel PRO/1000 PT Quad Port Server Adapter",
770 WM_T_82571, WMP_F_1000T, },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
773 "Intel i82572EI 1000baseX Ethernet",
774 WM_T_82572, WMP_F_1000X },
775 #if 0
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
777 "Intel i82572EI Gigabit Ethernet (SERDES)",
778 WM_T_82572, WMP_F_SERDES },
779 #endif
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
782 "Intel i82572EI 1000baseT Ethernet",
783 WM_T_82572, WMP_F_1000T },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
786 "Intel i82573E",
787 WM_T_82573, WMP_F_1000T },
788
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
790 "Intel i82573E IAMT",
791 WM_T_82573, WMP_F_1000T },
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
794 "Intel i82573L Gigabit Ethernet",
795 WM_T_82573, WMP_F_1000T },
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
798 "Intel i82574L",
799 WM_T_82574, WMP_F_1000T },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
802 "Intel i82583V",
803 WM_T_82583, WMP_F_1000T },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
806 "i80003 dual 1000baseT Ethernet",
807 WM_T_80003, WMP_F_1000T },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
810 "i80003 dual 1000baseX Ethernet",
811 WM_T_80003, WMP_F_1000T },
812 #if 0
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
814 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
815 WM_T_80003, WMP_F_SERDES },
816 #endif
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
819 "Intel i80003 1000baseT Ethernet",
820 WM_T_80003, WMP_F_1000T },
821 #if 0
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
823 "Intel i80003 Gigabit Ethernet (SERDES)",
824 WM_T_80003, WMP_F_SERDES },
825 #endif
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
827 "Intel i82801H (M_AMT) LAN Controller",
828 WM_T_ICH8, WMP_F_1000T },
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
830 "Intel i82801H (AMT) LAN Controller",
831 WM_T_ICH8, WMP_F_1000T },
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
833 "Intel i82801H LAN Controller",
834 WM_T_ICH8, WMP_F_1000T },
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
836 "Intel i82801H (IFE) LAN Controller",
837 WM_T_ICH8, WMP_F_1000T },
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
839 "Intel i82801H (M) LAN Controller",
840 WM_T_ICH8, WMP_F_1000T },
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
842 "Intel i82801H IFE (GT) LAN Controller",
843 WM_T_ICH8, WMP_F_1000T },
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
845 "Intel i82801H IFE (G) LAN Controller",
846 WM_T_ICH8, WMP_F_1000T },
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
848 "82801I (AMT) LAN Controller",
849 WM_T_ICH9, WMP_F_1000T },
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
851 "82801I LAN Controller",
852 WM_T_ICH9, WMP_F_1000T },
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
854 "82801I (G) LAN Controller",
855 WM_T_ICH9, WMP_F_1000T },
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
857 "82801I (GT) LAN Controller",
858 WM_T_ICH9, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
860 "82801I (C) LAN Controller",
861 WM_T_ICH9, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
863 "82801I mobile LAN Controller",
864 WM_T_ICH9, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
866 "82801I mobile (V) LAN Controller",
867 WM_T_ICH9, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
869 "82801I mobile (AMT) LAN Controller",
870 WM_T_ICH9, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
872 "82567LM-4 LAN Controller",
873 WM_T_ICH9, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
875 "82567V-3 LAN Controller",
876 WM_T_ICH9, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
878 "82567LM-2 LAN Controller",
879 WM_T_ICH10, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
881 "82567LF-2 LAN Controller",
882 WM_T_ICH10, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
884 "82567LM-3 LAN Controller",
885 WM_T_ICH10, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
887 "82567LF-3 LAN Controller",
888 WM_T_ICH10, WMP_F_1000T },
889 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
890 "82567V-2 LAN Controller",
891 WM_T_ICH10, WMP_F_1000T },
892 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
893 "PCH LAN (82578LM) Controller",
894 WM_T_PCH, WMP_F_1000T },
895 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
896 "PCH LAN (82578LC) Controller",
897 WM_T_PCH, WMP_F_1000T },
898 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
899 "PCH LAN (82578DM) Controller",
900 WM_T_PCH, WMP_F_1000T },
901 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
902 "PCH LAN (82578DC) Controller",
903 WM_T_PCH, WMP_F_1000T },
904 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_COPPER,
905 "82575EB dual-1000baseT Ethernet",
906 WM_T_82575, WMP_F_1000T },
907 #if 0
908 /*
909 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
910 * disabled for now ...
911 */
912 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
913 "82575EB dual-1000baseX Ethernet (SERDES)",
914 WM_T_82575, WMP_F_SERDES },
915 #endif
916 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
917 "82575GB quad-1000baseT Ethernet",
918 WM_T_82575, WMP_F_1000T },
919 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
920 "82575GB quad-1000baseT Ethernet (PM)",
921 WM_T_82575, WMP_F_1000T },
922 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_COPPER,
923 "82576 1000BaseT Ethernet",
924 WM_T_82576, WMP_F_1000T },
925 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_FIBER,
926 "82576 1000BaseX Ethernet",
927 WM_T_82576, WMP_F_1000X },
928 #if 0
929 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES,
930 "82576 gigabit Ethernet (SERDES)",
931 WM_T_82576, WMP_F_SERDES },
932 #endif
933 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
934 "82576 quad-1000BaseT Ethernet",
935 WM_T_82576, WMP_F_1000T },
936 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS,
937 "82576 gigabit Ethernet",
938 WM_T_82576, WMP_F_1000T },
939 #if 0
940 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_NS_SERDES,
941 "82576 gigabit Ethernet (SERDES)",
942 WM_T_82576, WMP_F_SERDES },
943 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
944 "82576 quad-gigabit Ethernet (SERDES)",
945 WM_T_82576, WMP_F_SERDES },
946 #endif
947 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER,
948 "82580 1000BaseT Ethernet",
949 WM_T_82580, WMP_F_1000T },
950 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_FIBER,
951 "82580 1000BaseX Ethernet",
952 WM_T_82580, WMP_F_1000X },
953 #if 0
954 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SERDES,
955 "82580 1000BaseT Ethernet (SERDES)",
956 WM_T_82580, WMP_F_SERDES },
957 #endif
958 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_SGMII,
959 "82580 gigabit Ethernet (SGMII)",
960 WM_T_82580, WMP_F_1000T },
961 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
962 "82580 dual-1000BaseT Ethernet",
963 WM_T_82580, WMP_F_1000T },
964 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER,
965 "82580 1000BaseT Ethernet",
966 WM_T_82580ER, WMP_F_1000T },
967 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82580_ER_DUAL,
968 "82580 dual-1000BaseT Ethernet",
969 WM_T_82580ER, WMP_F_1000T },
970 { 0, 0,
971 NULL,
972 0, 0 },
973 };
974
975 #ifdef WM_EVENT_COUNTERS
976 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
977 #endif /* WM_EVENT_COUNTERS */
978
979 #if 0 /* Not currently used */
980 static inline uint32_t
981 wm_io_read(struct wm_softc *sc, int reg)
982 {
983
984 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
985 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
986 }
987 #endif
988
989 static inline void
990 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
991 {
992
993 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
994 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
995 }
996
997 static inline void
998 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
999 uint32_t data)
1000 {
1001 uint32_t regval;
1002 int i;
1003
1004 regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1005
1006 CSR_WRITE(sc, reg, regval);
1007
1008 for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1009 delay(5);
1010 if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1011 break;
1012 }
1013 if (i == SCTL_CTL_POLL_TIMEOUT) {
1014 aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1015 device_xname(sc->sc_dev), reg);
1016 }
1017 }
1018
1019 static inline void
1020 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1021 {
1022 wa->wa_low = htole32(v & 0xffffffffU);
1023 if (sizeof(bus_addr_t) == 8)
1024 wa->wa_high = htole32((uint64_t) v >> 32);
1025 else
1026 wa->wa_high = 0;
1027 }
1028
1029 static void
1030 wm_set_spiaddrbits(struct wm_softc *sc)
1031 {
1032 uint32_t reg;
1033
1034 sc->sc_flags |= WM_F_EEPROM_SPI;
1035 reg = CSR_READ(sc, WMREG_EECD);
1036 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1037 }
1038
1039 static const struct wm_product *
1040 wm_lookup(const struct pci_attach_args *pa)
1041 {
1042 const struct wm_product *wmp;
1043
1044 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1045 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1046 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1047 return wmp;
1048 }
1049 return NULL;
1050 }
1051
1052 static int
1053 wm_match(device_t parent, cfdata_t cf, void *aux)
1054 {
1055 struct pci_attach_args *pa = aux;
1056
1057 if (wm_lookup(pa) != NULL)
1058 return 1;
1059
1060 return 0;
1061 }
1062
1063 static void
1064 wm_attach(device_t parent, device_t self, void *aux)
1065 {
1066 struct wm_softc *sc = device_private(self);
1067 struct pci_attach_args *pa = aux;
1068 prop_dictionary_t dict;
1069 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1070 pci_chipset_tag_t pc = pa->pa_pc;
1071 pci_intr_handle_t ih;
1072 const char *intrstr = NULL;
1073 const char *eetype, *xname;
1074 bus_space_tag_t memt;
1075 bus_space_handle_t memh;
1076 bus_size_t memsize;
1077 int memh_valid;
1078 int i, error;
1079 const struct wm_product *wmp;
1080 prop_data_t ea;
1081 prop_number_t pn;
1082 uint8_t enaddr[ETHER_ADDR_LEN];
1083 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
1084 pcireg_t preg, memtype;
1085 uint32_t reg;
1086
1087 sc->sc_dev = self;
1088 callout_init(&sc->sc_tick_ch, 0);
1089
1090 wmp = wm_lookup(pa);
1091 if (wmp == NULL) {
1092 printf("\n");
1093 panic("wm_attach: impossible");
1094 }
1095
1096 sc->sc_pc = pa->pa_pc;
1097 sc->sc_pcitag = pa->pa_tag;
1098
1099 if (pci_dma64_available(pa))
1100 sc->sc_dmat = pa->pa_dmat64;
1101 else
1102 sc->sc_dmat = pa->pa_dmat;
1103
1104 sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1105 aprint_naive(": Ethernet controller\n");
1106 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, sc->sc_rev);
1107
1108 sc->sc_type = wmp->wmp_type;
1109 if (sc->sc_type < WM_T_82543) {
1110 if (sc->sc_rev < 2) {
1111 aprint_error_dev(sc->sc_dev,
1112 "i82542 must be at least rev. 2\n");
1113 return;
1114 }
1115 if (sc->sc_rev < 3)
1116 sc->sc_type = WM_T_82542_2_0;
1117 }
1118
1119 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1120 || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER))
1121 sc->sc_flags |= WM_F_NEWQUEUE;
1122
1123 /* Set device properties (mactype) */
1124 dict = device_properties(sc->sc_dev);
1125 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1126
1127 /*
1128 * Map the device. All devices support memory-mapped acccess,
1129 * and it is really required for normal operation.
1130 */
1131 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1132 switch (memtype) {
1133 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1134 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1135 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1136 memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1137 break;
1138 default:
1139 memh_valid = 0;
1140 break;
1141 }
1142
1143 if (memh_valid) {
1144 sc->sc_st = memt;
1145 sc->sc_sh = memh;
1146 sc->sc_ss = memsize;
1147 } else {
1148 aprint_error_dev(sc->sc_dev,
1149 "unable to map device registers\n");
1150 return;
1151 }
1152
1153 /*
1154 * In addition, i82544 and later support I/O mapped indirect
1155 * register access. It is not desirable (nor supported in
1156 * this driver) to use it for normal operation, though it is
1157 * required to work around bugs in some chip versions.
1158 */
1159 if (sc->sc_type >= WM_T_82544) {
1160 /* First we have to find the I/O BAR. */
1161 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1162 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1163 PCI_MAPREG_TYPE_IO)
1164 break;
1165 }
1166 if (i == PCI_MAPREG_END)
1167 aprint_error_dev(sc->sc_dev,
1168 "WARNING: unable to find I/O BAR\n");
1169 else {
1170 /*
1171 * The i8254x doesn't apparently respond when the
1172 * I/O BAR is 0, which looks somewhat like it's not
1173 * been configured.
1174 */
1175 preg = pci_conf_read(pc, pa->pa_tag, i);
1176 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1177 aprint_error_dev(sc->sc_dev,
1178 "WARNING: I/O BAR at zero.\n");
1179 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1180 0, &sc->sc_iot, &sc->sc_ioh,
1181 NULL, NULL) == 0) {
1182 sc->sc_flags |= WM_F_IOH_VALID;
1183 } else {
1184 aprint_error_dev(sc->sc_dev,
1185 "WARNING: unable to map I/O space\n");
1186 }
1187 }
1188
1189 }
1190
1191 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1192 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1193 preg |= PCI_COMMAND_MASTER_ENABLE;
1194 if (sc->sc_type < WM_T_82542_2_1)
1195 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1196 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1197
1198 /* power up chip */
1199 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1200 NULL)) && error != EOPNOTSUPP) {
1201 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1202 return;
1203 }
1204
1205 /*
1206 * Map and establish our interrupt.
1207 */
1208 if (pci_intr_map(pa, &ih)) {
1209 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1210 return;
1211 }
1212 intrstr = pci_intr_string(pc, ih);
1213 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1214 if (sc->sc_ih == NULL) {
1215 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1216 if (intrstr != NULL)
1217 aprint_error(" at %s", intrstr);
1218 aprint_error("\n");
1219 return;
1220 }
1221 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1222
1223 /*
1224 * Check the function ID (unit number of the chip).
1225 */
1226 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1227 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1228 || (sc->sc_type == WM_T_82575))
1229 sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1230 >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1231 else
1232 sc->sc_funcid = 0;
1233
1234 /*
1235 * Determine a few things about the bus we're connected to.
1236 */
1237 if (sc->sc_type < WM_T_82543) {
1238 /* We don't really know the bus characteristics here. */
1239 sc->sc_bus_speed = 33;
1240 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1241 /*
1242 * CSA (Communication Streaming Architecture) is about as fast
1243 * a 32-bit 66MHz PCI Bus.
1244 */
1245 sc->sc_flags |= WM_F_CSA;
1246 sc->sc_bus_speed = 66;
1247 aprint_verbose_dev(sc->sc_dev,
1248 "Communication Streaming Architecture\n");
1249 if (sc->sc_type == WM_T_82547) {
1250 callout_init(&sc->sc_txfifo_ch, 0);
1251 callout_setfunc(&sc->sc_txfifo_ch,
1252 wm_82547_txfifo_stall, sc);
1253 aprint_verbose_dev(sc->sc_dev,
1254 "using 82547 Tx FIFO stall work-around\n");
1255 }
1256 } else if (sc->sc_type >= WM_T_82571) {
1257 sc->sc_flags |= WM_F_PCIE;
1258 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1259 && (sc->sc_type != WM_T_ICH10)
1260 && (sc->sc_type != WM_T_PCH)) {
1261 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1262 /* ICH* and PCH have no PCIe capability registers */
1263 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1264 PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1265 NULL) == 0)
1266 aprint_error_dev(sc->sc_dev,
1267 "unable to find PCIe capability\n");
1268 }
1269 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1270 } else {
1271 reg = CSR_READ(sc, WMREG_STATUS);
1272 if (reg & STATUS_BUS64)
1273 sc->sc_flags |= WM_F_BUS64;
1274 if ((reg & STATUS_PCIX_MODE) != 0) {
1275 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1276
1277 sc->sc_flags |= WM_F_PCIX;
1278 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1279 PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1280 aprint_error_dev(sc->sc_dev,
1281 "unable to find PCIX capability\n");
1282 else if (sc->sc_type != WM_T_82545_3 &&
1283 sc->sc_type != WM_T_82546_3) {
1284 /*
1285 * Work around a problem caused by the BIOS
1286 * setting the max memory read byte count
1287 * incorrectly.
1288 */
1289 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1290 sc->sc_pcixe_capoff + PCI_PCIX_CMD);
1291 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1292 sc->sc_pcixe_capoff + PCI_PCIX_STATUS);
1293
1294 bytecnt =
1295 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1296 PCI_PCIX_CMD_BYTECNT_SHIFT;
1297 maxb =
1298 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1299 PCI_PCIX_STATUS_MAXB_SHIFT;
1300 if (bytecnt > maxb) {
1301 aprint_verbose_dev(sc->sc_dev,
1302 "resetting PCI-X MMRBC: %d -> %d\n",
1303 512 << bytecnt, 512 << maxb);
1304 pcix_cmd = (pcix_cmd &
1305 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1306 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1307 pci_conf_write(pa->pa_pc, pa->pa_tag,
1308 sc->sc_pcixe_capoff + PCI_PCIX_CMD,
1309 pcix_cmd);
1310 }
1311 }
1312 }
1313 /*
1314 * The quad port adapter is special; it has a PCIX-PCIX
1315 * bridge on the board, and can run the secondary bus at
1316 * a higher speed.
1317 */
1318 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1319 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1320 : 66;
1321 } else if (sc->sc_flags & WM_F_PCIX) {
1322 switch (reg & STATUS_PCIXSPD_MASK) {
1323 case STATUS_PCIXSPD_50_66:
1324 sc->sc_bus_speed = 66;
1325 break;
1326 case STATUS_PCIXSPD_66_100:
1327 sc->sc_bus_speed = 100;
1328 break;
1329 case STATUS_PCIXSPD_100_133:
1330 sc->sc_bus_speed = 133;
1331 break;
1332 default:
1333 aprint_error_dev(sc->sc_dev,
1334 "unknown PCIXSPD %d; assuming 66MHz\n",
1335 reg & STATUS_PCIXSPD_MASK);
1336 sc->sc_bus_speed = 66;
1337 break;
1338 }
1339 } else
1340 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1341 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1342 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1343 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1344 }
1345
1346 /*
1347 * Allocate the control data structures, and create and load the
1348 * DMA map for it.
1349 *
1350 * NOTE: All Tx descriptors must be in the same 4G segment of
1351 * memory. So must Rx descriptors. We simplify by allocating
1352 * both sets within the same 4G segment.
1353 */
1354 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1355 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1356 sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1357 sizeof(struct wm_control_data_82542) :
1358 sizeof(struct wm_control_data_82544);
1359 if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1360 (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1361 &sc->sc_cd_rseg, 0)) != 0) {
1362 aprint_error_dev(sc->sc_dev,
1363 "unable to allocate control data, error = %d\n",
1364 error);
1365 goto fail_0;
1366 }
1367
1368 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1369 sc->sc_cd_rseg, sc->sc_cd_size,
1370 (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1371 aprint_error_dev(sc->sc_dev,
1372 "unable to map control data, error = %d\n", error);
1373 goto fail_1;
1374 }
1375
1376 if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1377 sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1378 aprint_error_dev(sc->sc_dev,
1379 "unable to create control data DMA map, error = %d\n",
1380 error);
1381 goto fail_2;
1382 }
1383
1384 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1385 sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1386 aprint_error_dev(sc->sc_dev,
1387 "unable to load control data DMA map, error = %d\n",
1388 error);
1389 goto fail_3;
1390 }
1391
1392 /*
1393 * Create the transmit buffer DMA maps.
1394 */
1395 WM_TXQUEUELEN(sc) =
1396 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1397 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1398 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1399 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1400 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1401 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1402 aprint_error_dev(sc->sc_dev,
1403 "unable to create Tx DMA map %d, error = %d\n",
1404 i, error);
1405 goto fail_4;
1406 }
1407 }
1408
1409 /*
1410 * Create the receive buffer DMA maps.
1411 */
1412 for (i = 0; i < WM_NRXDESC; i++) {
1413 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1414 MCLBYTES, 0, 0,
1415 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1416 aprint_error_dev(sc->sc_dev,
1417 "unable to create Rx DMA map %d error = %d\n",
1418 i, error);
1419 goto fail_5;
1420 }
1421 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1422 }
1423
1424 /* clear interesting stat counters */
1425 CSR_READ(sc, WMREG_COLC);
1426 CSR_READ(sc, WMREG_RXERRC);
1427
1428 /*
1429 * Reset the chip to a known state.
1430 */
1431 wm_reset(sc);
1432
1433 switch (sc->sc_type) {
1434 case WM_T_82571:
1435 case WM_T_82572:
1436 case WM_T_82573:
1437 case WM_T_82574:
1438 case WM_T_82583:
1439 case WM_T_80003:
1440 case WM_T_ICH8:
1441 case WM_T_ICH9:
1442 case WM_T_ICH10:
1443 case WM_T_PCH:
1444 if (wm_check_mng_mode(sc) != 0)
1445 wm_get_hw_control(sc);
1446 break;
1447 default:
1448 break;
1449 }
1450
1451 /*
1452 * Get some information about the EEPROM.
1453 */
1454 switch (sc->sc_type) {
1455 case WM_T_82542_2_0:
1456 case WM_T_82542_2_1:
1457 case WM_T_82543:
1458 case WM_T_82544:
1459 /* Microwire */
1460 sc->sc_ee_addrbits = 6;
1461 break;
1462 case WM_T_82540:
1463 case WM_T_82545:
1464 case WM_T_82545_3:
1465 case WM_T_82546:
1466 case WM_T_82546_3:
1467 /* Microwire */
1468 reg = CSR_READ(sc, WMREG_EECD);
1469 if (reg & EECD_EE_SIZE)
1470 sc->sc_ee_addrbits = 8;
1471 else
1472 sc->sc_ee_addrbits = 6;
1473 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1474 break;
1475 case WM_T_82541:
1476 case WM_T_82541_2:
1477 case WM_T_82547:
1478 case WM_T_82547_2:
1479 reg = CSR_READ(sc, WMREG_EECD);
1480 if (reg & EECD_EE_TYPE) {
1481 /* SPI */
1482 wm_set_spiaddrbits(sc);
1483 } else
1484 /* Microwire */
1485 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1486 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1487 break;
1488 case WM_T_82571:
1489 case WM_T_82572:
1490 /* SPI */
1491 wm_set_spiaddrbits(sc);
1492 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1493 break;
1494 case WM_T_82573:
1495 case WM_T_82574:
1496 case WM_T_82583:
1497 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1498 sc->sc_flags |= WM_F_EEPROM_FLASH;
1499 else {
1500 /* SPI */
1501 wm_set_spiaddrbits(sc);
1502 }
1503 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1504 break;
1505 case WM_T_82575:
1506 case WM_T_82576:
1507 case WM_T_82580:
1508 case WM_T_82580ER:
1509 case WM_T_80003:
1510 /* SPI */
1511 wm_set_spiaddrbits(sc);
1512 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1513 break;
1514 case WM_T_ICH8:
1515 case WM_T_ICH9:
1516 case WM_T_ICH10:
1517 case WM_T_PCH:
1518 /* FLASH */
1519 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1520 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1521 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1522 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1523 aprint_error_dev(sc->sc_dev,
1524 "can't map FLASH registers\n");
1525 return;
1526 }
1527 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1528 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1529 ICH_FLASH_SECTOR_SIZE;
1530 sc->sc_ich8_flash_bank_size =
1531 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1532 sc->sc_ich8_flash_bank_size -=
1533 (reg & ICH_GFPREG_BASE_MASK);
1534 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1535 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1536 break;
1537 default:
1538 break;
1539 }
1540
1541 /*
1542 * Defer printing the EEPROM type until after verifying the checksum
1543 * This allows the EEPROM type to be printed correctly in the case
1544 * that no EEPROM is attached.
1545 */
1546 /*
1547 * Validate the EEPROM checksum. If the checksum fails, flag
1548 * this for later, so we can fail future reads from the EEPROM.
1549 */
1550 if (wm_validate_eeprom_checksum(sc)) {
1551 /*
1552 * Read twice again because some PCI-e parts fail the
1553 * first check due to the link being in sleep state.
1554 */
1555 if (wm_validate_eeprom_checksum(sc))
1556 sc->sc_flags |= WM_F_EEPROM_INVALID;
1557 }
1558
1559 /* Set device properties (macflags) */
1560 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1561
1562 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1563 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1564 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1565 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1566 } else {
1567 if (sc->sc_flags & WM_F_EEPROM_SPI)
1568 eetype = "SPI";
1569 else
1570 eetype = "MicroWire";
1571 aprint_verbose_dev(sc->sc_dev,
1572 "%u word (%d address bits) %s EEPROM\n",
1573 1U << sc->sc_ee_addrbits,
1574 sc->sc_ee_addrbits, eetype);
1575 }
1576
1577 /*
1578 * Read the Ethernet address from the EEPROM, if not first found
1579 * in device properties.
1580 */
1581 ea = prop_dictionary_get(dict, "mac-address");
1582 if (ea != NULL) {
1583 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1584 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1585 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1586 } else {
1587 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1588 sizeof(myea) / sizeof(myea[0]), myea)) {
1589 aprint_error_dev(sc->sc_dev,
1590 "unable to read Ethernet address\n");
1591 return;
1592 }
1593 enaddr[0] = myea[0] & 0xff;
1594 enaddr[1] = myea[0] >> 8;
1595 enaddr[2] = myea[1] & 0xff;
1596 enaddr[3] = myea[1] >> 8;
1597 enaddr[4] = myea[2] & 0xff;
1598 enaddr[5] = myea[2] >> 8;
1599 }
1600
1601 /*
1602 * Toggle the LSB of the MAC address on the second port
1603 * of the dual port controller.
1604 */
1605 if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1606 || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1607 || (sc->sc_type == WM_T_82575)) {
1608 if (sc->sc_funcid == 1)
1609 enaddr[5] ^= 1;
1610 }
1611
1612 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1613 ether_sprintf(enaddr));
1614
1615 /*
1616 * Read the config info from the EEPROM, and set up various
1617 * bits in the control registers based on their contents.
1618 */
1619 pn = prop_dictionary_get(dict, "i82543-cfg1");
1620 if (pn != NULL) {
1621 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1622 cfg1 = (uint16_t) prop_number_integer_value(pn);
1623 } else {
1624 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1625 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1626 return;
1627 }
1628 }
1629
1630 pn = prop_dictionary_get(dict, "i82543-cfg2");
1631 if (pn != NULL) {
1632 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1633 cfg2 = (uint16_t) prop_number_integer_value(pn);
1634 } else {
1635 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1636 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1637 return;
1638 }
1639 }
1640
1641 if (sc->sc_type >= WM_T_82544) {
1642 pn = prop_dictionary_get(dict, "i82543-swdpin");
1643 if (pn != NULL) {
1644 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1645 swdpin = (uint16_t) prop_number_integer_value(pn);
1646 } else {
1647 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1648 aprint_error_dev(sc->sc_dev,
1649 "unable to read SWDPIN\n");
1650 return;
1651 }
1652 }
1653 }
1654
1655 if (cfg1 & EEPROM_CFG1_ILOS)
1656 sc->sc_ctrl |= CTRL_ILOS;
1657 if (sc->sc_type >= WM_T_82544) {
1658 sc->sc_ctrl |=
1659 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1660 CTRL_SWDPIO_SHIFT;
1661 sc->sc_ctrl |=
1662 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1663 CTRL_SWDPINS_SHIFT;
1664 } else {
1665 sc->sc_ctrl |=
1666 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1667 CTRL_SWDPIO_SHIFT;
1668 }
1669
1670 #if 0
1671 if (sc->sc_type >= WM_T_82544) {
1672 if (cfg1 & EEPROM_CFG1_IPS0)
1673 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1674 if (cfg1 & EEPROM_CFG1_IPS1)
1675 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1676 sc->sc_ctrl_ext |=
1677 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1678 CTRL_EXT_SWDPIO_SHIFT;
1679 sc->sc_ctrl_ext |=
1680 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1681 CTRL_EXT_SWDPINS_SHIFT;
1682 } else {
1683 sc->sc_ctrl_ext |=
1684 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1685 CTRL_EXT_SWDPIO_SHIFT;
1686 }
1687 #endif
1688
1689 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1690 #if 0
1691 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1692 #endif
1693
1694 /*
1695 * Set up some register offsets that are different between
1696 * the i82542 and the i82543 and later chips.
1697 */
1698 if (sc->sc_type < WM_T_82543) {
1699 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1700 sc->sc_tdt_reg = WMREG_OLD_TDT;
1701 } else {
1702 sc->sc_rdt_reg = WMREG_RDT;
1703 sc->sc_tdt_reg = WMREG_TDT;
1704 }
1705
1706 if (sc->sc_type == WM_T_PCH) {
1707 uint16_t val;
1708
1709 /* Save the NVM K1 bit setting */
1710 wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1711
1712 if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1713 sc->sc_nvm_k1_enabled = 1;
1714 else
1715 sc->sc_nvm_k1_enabled = 0;
1716 }
1717
1718 /*
1719 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1720 * media structures accordingly.
1721 */
1722 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1723 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1724 || sc->sc_type == WM_T_82573
1725 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1726 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1727 wm_gmii_mediainit(sc, wmp->wmp_product);
1728 } else if (sc->sc_type < WM_T_82543 ||
1729 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1730 if (wmp->wmp_flags & WMP_F_1000T)
1731 aprint_error_dev(sc->sc_dev,
1732 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1733 wm_tbi_mediainit(sc);
1734 } else {
1735 switch (sc->sc_type) {
1736 case WM_T_82575:
1737 case WM_T_82576:
1738 case WM_T_82580:
1739 case WM_T_82580ER:
1740 reg = CSR_READ(sc, WMREG_CTRL_EXT);
1741 switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1742 case CTRL_EXT_LINK_MODE_SGMII:
1743 aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1744 sc->sc_flags |= WM_F_SGMII;
1745 CSR_WRITE(sc, WMREG_CTRL_EXT,
1746 reg | CTRL_EXT_I2C_ENA);
1747 wm_gmii_mediainit(sc, wmp->wmp_product);
1748 break;
1749 case CTRL_EXT_LINK_MODE_1000KX:
1750 case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1751 aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1752 CSR_WRITE(sc, WMREG_CTRL_EXT,
1753 reg | CTRL_EXT_I2C_ENA);
1754 panic("not supported yet\n");
1755 break;
1756 case CTRL_EXT_LINK_MODE_GMII:
1757 default:
1758 CSR_WRITE(sc, WMREG_CTRL_EXT,
1759 reg & ~CTRL_EXT_I2C_ENA);
1760 wm_gmii_mediainit(sc, wmp->wmp_product);
1761 break;
1762 }
1763 break;
1764 default:
1765 if (wmp->wmp_flags & WMP_F_1000X)
1766 aprint_error_dev(sc->sc_dev,
1767 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1768 wm_gmii_mediainit(sc, wmp->wmp_product);
1769 }
1770 }
1771
1772 ifp = &sc->sc_ethercom.ec_if;
1773 xname = device_xname(sc->sc_dev);
1774 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1775 ifp->if_softc = sc;
1776 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1777 ifp->if_ioctl = wm_ioctl;
1778 ifp->if_start = wm_start;
1779 ifp->if_watchdog = wm_watchdog;
1780 ifp->if_init = wm_init;
1781 ifp->if_stop = wm_stop;
1782 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1783 IFQ_SET_READY(&ifp->if_snd);
1784
1785 /* Check for jumbo frame */
1786 switch (sc->sc_type) {
1787 case WM_T_82573:
1788 /* XXX limited to 9234 if ASPM is disabled */
1789 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1790 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1791 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1792 break;
1793 case WM_T_82571:
1794 case WM_T_82572:
1795 case WM_T_82574:
1796 case WM_T_82575:
1797 case WM_T_82576:
1798 case WM_T_82580:
1799 case WM_T_82580ER:
1800 case WM_T_80003:
1801 case WM_T_ICH9:
1802 case WM_T_ICH10:
1803 /* XXX limited to 9234 */
1804 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1805 break;
1806 case WM_T_PCH:
1807 /* XXX limited to 4096 */
1808 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1809 break;
1810 case WM_T_82542_2_0:
1811 case WM_T_82542_2_1:
1812 case WM_T_82583:
1813 case WM_T_ICH8:
1814 /* No support for jumbo frame */
1815 break;
1816 default:
1817 /* ETHER_MAX_LEN_JUMBO */
1818 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1819 break;
1820 }
1821
1822 /*
1823 * If we're a i82543 or greater, we can support VLANs.
1824 */
1825 if (sc->sc_type >= WM_T_82543)
1826 sc->sc_ethercom.ec_capabilities |=
1827 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1828
1829 /*
1830 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1831 * on i82543 and later.
1832 */
1833 if (sc->sc_type >= WM_T_82543) {
1834 ifp->if_capabilities |=
1835 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1836 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1837 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1838 IFCAP_CSUM_TCPv6_Tx |
1839 IFCAP_CSUM_UDPv6_Tx;
1840 }
1841
1842 /*
1843 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1844 *
1845 * 82541GI (8086:1076) ... no
1846 * 82572EI (8086:10b9) ... yes
1847 */
1848 if (sc->sc_type >= WM_T_82571) {
1849 ifp->if_capabilities |=
1850 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1851 }
1852
1853 /*
1854 * If we're a i82544 or greater (except i82547), we can do
1855 * TCP segmentation offload.
1856 */
1857 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1858 ifp->if_capabilities |= IFCAP_TSOv4;
1859 }
1860
1861 if (sc->sc_type >= WM_T_82571) {
1862 ifp->if_capabilities |= IFCAP_TSOv6;
1863 }
1864
1865 /*
1866 * Attach the interface.
1867 */
1868 if_attach(ifp);
1869 ether_ifattach(ifp, enaddr);
1870 #if NRND > 0
1871 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1872 #endif
1873
1874 #ifdef WM_EVENT_COUNTERS
1875 /* Attach event counters. */
1876 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1877 NULL, xname, "txsstall");
1878 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1879 NULL, xname, "txdstall");
1880 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1881 NULL, xname, "txfifo_stall");
1882 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1883 NULL, xname, "txdw");
1884 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1885 NULL, xname, "txqe");
1886 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1887 NULL, xname, "rxintr");
1888 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1889 NULL, xname, "linkintr");
1890
1891 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1892 NULL, xname, "rxipsum");
1893 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1894 NULL, xname, "rxtusum");
1895 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1896 NULL, xname, "txipsum");
1897 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1898 NULL, xname, "txtusum");
1899 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1900 NULL, xname, "txtusum6");
1901
1902 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1903 NULL, xname, "txtso");
1904 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1905 NULL, xname, "txtso6");
1906 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1907 NULL, xname, "txtsopain");
1908
1909 for (i = 0; i < WM_NTXSEGS; i++) {
1910 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1911 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1912 NULL, xname, wm_txseg_evcnt_names[i]);
1913 }
1914
1915 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1916 NULL, xname, "txdrop");
1917
1918 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1919 NULL, xname, "tu");
1920
1921 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1922 NULL, xname, "tx_xoff");
1923 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1924 NULL, xname, "tx_xon");
1925 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1926 NULL, xname, "rx_xoff");
1927 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1928 NULL, xname, "rx_xon");
1929 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1930 NULL, xname, "rx_macctl");
1931 #endif /* WM_EVENT_COUNTERS */
1932
1933 if (pmf_device_register(self, NULL, NULL))
1934 pmf_class_network_register(self, ifp);
1935 else
1936 aprint_error_dev(self, "couldn't establish power handler\n");
1937
1938 return;
1939
1940 /*
1941 * Free any resources we've allocated during the failed attach
1942 * attempt. Do this in reverse order and fall through.
1943 */
1944 fail_5:
1945 for (i = 0; i < WM_NRXDESC; i++) {
1946 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1947 bus_dmamap_destroy(sc->sc_dmat,
1948 sc->sc_rxsoft[i].rxs_dmamap);
1949 }
1950 fail_4:
1951 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1952 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1953 bus_dmamap_destroy(sc->sc_dmat,
1954 sc->sc_txsoft[i].txs_dmamap);
1955 }
1956 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1957 fail_3:
1958 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1959 fail_2:
1960 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1961 sc->sc_cd_size);
1962 fail_1:
1963 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
1964 fail_0:
1965 return;
1966 }
1967
1968 static int
1969 wm_detach(device_t self, int flags __unused)
1970 {
1971 struct wm_softc *sc = device_private(self);
1972 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1973 int i, s;
1974
1975 s = splnet();
1976 /* Stop the interface. Callouts are stopped in it. */
1977 wm_stop(ifp, 1);
1978 splx(s);
1979
1980 pmf_device_deregister(self);
1981
1982 /* Tell the firmware about the release */
1983 #if 0
1984 wm_release_manageability(sc);
1985 #endif
1986
1987 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
1988
1989 /* Delete all remaining media. */
1990 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
1991
1992 ether_ifdetach(ifp);
1993 if_detach(ifp);
1994
1995
1996 /* Unload RX dmamaps and free mbufs */
1997 wm_rxdrain(sc);
1998
1999 /* Free dmamap. It's the same as the end of the wm_attach() function */
2000 for (i = 0; i < WM_NRXDESC; i++) {
2001 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2002 bus_dmamap_destroy(sc->sc_dmat,
2003 sc->sc_rxsoft[i].rxs_dmamap);
2004 }
2005 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2006 if (sc->sc_txsoft[i].txs_dmamap != NULL)
2007 bus_dmamap_destroy(sc->sc_dmat,
2008 sc->sc_txsoft[i].txs_dmamap);
2009 }
2010 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2011 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2012 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2013 sc->sc_cd_size);
2014 bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2015
2016 /* Disestablish the interrupt handler */
2017 if (sc->sc_ih != NULL) {
2018 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2019 sc->sc_ih = NULL;
2020 }
2021
2022 /* Unmap the register */
2023 if (sc->sc_ss) {
2024 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2025 sc->sc_ss = 0;
2026 }
2027
2028 #if 0
2029 wm_release_hw_control(sc);
2030 #endif
2031
2032 return 0;
2033 }
2034
2035 /*
2036 * wm_tx_offload:
2037 *
2038 * Set up TCP/IP checksumming parameters for the
2039 * specified packet.
2040 */
2041 static int
2042 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2043 uint8_t *fieldsp)
2044 {
2045 struct mbuf *m0 = txs->txs_mbuf;
2046 struct livengood_tcpip_ctxdesc *t;
2047 uint32_t ipcs, tucs, cmd, cmdlen, seg;
2048 uint32_t ipcse;
2049 struct ether_header *eh;
2050 int offset, iphl;
2051 uint8_t fields;
2052
2053 /*
2054 * XXX It would be nice if the mbuf pkthdr had offset
2055 * fields for the protocol headers.
2056 */
2057
2058 eh = mtod(m0, struct ether_header *);
2059 switch (htons(eh->ether_type)) {
2060 case ETHERTYPE_IP:
2061 case ETHERTYPE_IPV6:
2062 offset = ETHER_HDR_LEN;
2063 break;
2064
2065 case ETHERTYPE_VLAN:
2066 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2067 break;
2068
2069 default:
2070 /*
2071 * Don't support this protocol or encapsulation.
2072 */
2073 *fieldsp = 0;
2074 *cmdp = 0;
2075 return 0;
2076 }
2077
2078 if ((m0->m_pkthdr.csum_flags &
2079 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2080 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2081 } else {
2082 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2083 }
2084 ipcse = offset + iphl - 1;
2085
2086 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2087 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2088 seg = 0;
2089 fields = 0;
2090
2091 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2092 int hlen = offset + iphl;
2093 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2094
2095 if (__predict_false(m0->m_len <
2096 (hlen + sizeof(struct tcphdr)))) {
2097 /*
2098 * TCP/IP headers are not in the first mbuf; we need
2099 * to do this the slow and painful way. Let's just
2100 * hope this doesn't happen very often.
2101 */
2102 struct tcphdr th;
2103
2104 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2105
2106 m_copydata(m0, hlen, sizeof(th), &th);
2107 if (v4) {
2108 struct ip ip;
2109
2110 m_copydata(m0, offset, sizeof(ip), &ip);
2111 ip.ip_len = 0;
2112 m_copyback(m0,
2113 offset + offsetof(struct ip, ip_len),
2114 sizeof(ip.ip_len), &ip.ip_len);
2115 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2116 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2117 } else {
2118 struct ip6_hdr ip6;
2119
2120 m_copydata(m0, offset, sizeof(ip6), &ip6);
2121 ip6.ip6_plen = 0;
2122 m_copyback(m0,
2123 offset + offsetof(struct ip6_hdr, ip6_plen),
2124 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2125 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2126 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2127 }
2128 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2129 sizeof(th.th_sum), &th.th_sum);
2130
2131 hlen += th.th_off << 2;
2132 } else {
2133 /*
2134 * TCP/IP headers are in the first mbuf; we can do
2135 * this the easy way.
2136 */
2137 struct tcphdr *th;
2138
2139 if (v4) {
2140 struct ip *ip =
2141 (void *)(mtod(m0, char *) + offset);
2142 th = (void *)(mtod(m0, char *) + hlen);
2143
2144 ip->ip_len = 0;
2145 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2146 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2147 } else {
2148 struct ip6_hdr *ip6 =
2149 (void *)(mtod(m0, char *) + offset);
2150 th = (void *)(mtod(m0, char *) + hlen);
2151
2152 ip6->ip6_plen = 0;
2153 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2154 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2155 }
2156 hlen += th->th_off << 2;
2157 }
2158
2159 if (v4) {
2160 WM_EVCNT_INCR(&sc->sc_ev_txtso);
2161 cmdlen |= WTX_TCPIP_CMD_IP;
2162 } else {
2163 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2164 ipcse = 0;
2165 }
2166 cmd |= WTX_TCPIP_CMD_TSE;
2167 cmdlen |= WTX_TCPIP_CMD_TSE |
2168 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2169 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2170 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2171 }
2172
2173 /*
2174 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2175 * offload feature, if we load the context descriptor, we
2176 * MUST provide valid values for IPCSS and TUCSS fields.
2177 */
2178
2179 ipcs = WTX_TCPIP_IPCSS(offset) |
2180 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2181 WTX_TCPIP_IPCSE(ipcse);
2182 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2183 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2184 fields |= WTX_IXSM;
2185 }
2186
2187 offset += iphl;
2188
2189 if (m0->m_pkthdr.csum_flags &
2190 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2191 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2192 fields |= WTX_TXSM;
2193 tucs = WTX_TCPIP_TUCSS(offset) |
2194 WTX_TCPIP_TUCSO(offset +
2195 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2196 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2197 } else if ((m0->m_pkthdr.csum_flags &
2198 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2199 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2200 fields |= WTX_TXSM;
2201 tucs = WTX_TCPIP_TUCSS(offset) |
2202 WTX_TCPIP_TUCSO(offset +
2203 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2204 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2205 } else {
2206 /* Just initialize it to a valid TCP context. */
2207 tucs = WTX_TCPIP_TUCSS(offset) |
2208 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2209 WTX_TCPIP_TUCSE(0) /* rest of packet */;
2210 }
2211
2212 /* Fill in the context descriptor. */
2213 t = (struct livengood_tcpip_ctxdesc *)
2214 &sc->sc_txdescs[sc->sc_txnext];
2215 t->tcpip_ipcs = htole32(ipcs);
2216 t->tcpip_tucs = htole32(tucs);
2217 t->tcpip_cmdlen = htole32(cmdlen);
2218 t->tcpip_seg = htole32(seg);
2219 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2220
2221 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2222 txs->txs_ndesc++;
2223
2224 *cmdp = cmd;
2225 *fieldsp = fields;
2226
2227 return 0;
2228 }
2229
2230 static void
2231 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2232 {
2233 struct mbuf *m;
2234 int i;
2235
2236 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2237 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2238 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2239 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2240 m->m_data, m->m_len, m->m_flags);
2241 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2242 i, i == 1 ? "" : "s");
2243 }
2244
2245 /*
2246 * wm_82547_txfifo_stall:
2247 *
2248 * Callout used to wait for the 82547 Tx FIFO to drain,
2249 * reset the FIFO pointers, and restart packet transmission.
2250 */
2251 static void
2252 wm_82547_txfifo_stall(void *arg)
2253 {
2254 struct wm_softc *sc = arg;
2255 int s;
2256
2257 s = splnet();
2258
2259 if (sc->sc_txfifo_stall) {
2260 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2261 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2262 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2263 /*
2264 * Packets have drained. Stop transmitter, reset
2265 * FIFO pointers, restart transmitter, and kick
2266 * the packet queue.
2267 */
2268 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2269 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2270 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2271 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2272 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2273 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2274 CSR_WRITE(sc, WMREG_TCTL, tctl);
2275 CSR_WRITE_FLUSH(sc);
2276
2277 sc->sc_txfifo_head = 0;
2278 sc->sc_txfifo_stall = 0;
2279 wm_start(&sc->sc_ethercom.ec_if);
2280 } else {
2281 /*
2282 * Still waiting for packets to drain; try again in
2283 * another tick.
2284 */
2285 callout_schedule(&sc->sc_txfifo_ch, 1);
2286 }
2287 }
2288
2289 splx(s);
2290 }
2291
2292 /*
2293 * wm_82547_txfifo_bugchk:
2294 *
2295 * Check for bug condition in the 82547 Tx FIFO. We need to
2296 * prevent enqueueing a packet that would wrap around the end
2297 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2298 *
2299 * We do this by checking the amount of space before the end
2300 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2301 * the Tx FIFO, wait for all remaining packets to drain, reset
2302 * the internal FIFO pointers to the beginning, and restart
2303 * transmission on the interface.
2304 */
2305 #define WM_FIFO_HDR 0x10
2306 #define WM_82547_PAD_LEN 0x3e0
2307 static int
2308 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2309 {
2310 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2311 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2312
2313 /* Just return if already stalled. */
2314 if (sc->sc_txfifo_stall)
2315 return 1;
2316
2317 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2318 /* Stall only occurs in half-duplex mode. */
2319 goto send_packet;
2320 }
2321
2322 if (len >= WM_82547_PAD_LEN + space) {
2323 sc->sc_txfifo_stall = 1;
2324 callout_schedule(&sc->sc_txfifo_ch, 1);
2325 return 1;
2326 }
2327
2328 send_packet:
2329 sc->sc_txfifo_head += len;
2330 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2331 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2332
2333 return 0;
2334 }
2335
2336 /*
2337 * wm_start: [ifnet interface function]
2338 *
2339 * Start packet transmission on the interface.
2340 */
2341 static void
2342 wm_start(struct ifnet *ifp)
2343 {
2344 struct wm_softc *sc = ifp->if_softc;
2345 struct mbuf *m0;
2346 struct m_tag *mtag;
2347 struct wm_txsoft *txs;
2348 bus_dmamap_t dmamap;
2349 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2350 bus_addr_t curaddr;
2351 bus_size_t seglen, curlen;
2352 uint32_t cksumcmd;
2353 uint8_t cksumfields;
2354
2355 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2356 return;
2357
2358 /*
2359 * Remember the previous number of free descriptors.
2360 */
2361 ofree = sc->sc_txfree;
2362
2363 /*
2364 * Loop through the send queue, setting up transmit descriptors
2365 * until we drain the queue, or use up all available transmit
2366 * descriptors.
2367 */
2368 for (;;) {
2369 /* Grab a packet off the queue. */
2370 IFQ_POLL(&ifp->if_snd, m0);
2371 if (m0 == NULL)
2372 break;
2373
2374 DPRINTF(WM_DEBUG_TX,
2375 ("%s: TX: have packet to transmit: %p\n",
2376 device_xname(sc->sc_dev), m0));
2377
2378 /* Get a work queue entry. */
2379 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2380 wm_txintr(sc);
2381 if (sc->sc_txsfree == 0) {
2382 DPRINTF(WM_DEBUG_TX,
2383 ("%s: TX: no free job descriptors\n",
2384 device_xname(sc->sc_dev)));
2385 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2386 break;
2387 }
2388 }
2389
2390 txs = &sc->sc_txsoft[sc->sc_txsnext];
2391 dmamap = txs->txs_dmamap;
2392
2393 use_tso = (m0->m_pkthdr.csum_flags &
2394 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2395
2396 /*
2397 * So says the Linux driver:
2398 * The controller does a simple calculation to make sure
2399 * there is enough room in the FIFO before initiating the
2400 * DMA for each buffer. The calc is:
2401 * 4 = ceil(buffer len / MSS)
2402 * To make sure we don't overrun the FIFO, adjust the max
2403 * buffer len if the MSS drops.
2404 */
2405 dmamap->dm_maxsegsz =
2406 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2407 ? m0->m_pkthdr.segsz << 2
2408 : WTX_MAX_LEN;
2409
2410 /*
2411 * Load the DMA map. If this fails, the packet either
2412 * didn't fit in the allotted number of segments, or we
2413 * were short on resources. For the too-many-segments
2414 * case, we simply report an error and drop the packet,
2415 * since we can't sanely copy a jumbo packet to a single
2416 * buffer.
2417 */
2418 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2419 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2420 if (error) {
2421 if (error == EFBIG) {
2422 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2423 log(LOG_ERR, "%s: Tx packet consumes too many "
2424 "DMA segments, dropping...\n",
2425 device_xname(sc->sc_dev));
2426 IFQ_DEQUEUE(&ifp->if_snd, m0);
2427 wm_dump_mbuf_chain(sc, m0);
2428 m_freem(m0);
2429 continue;
2430 }
2431 /*
2432 * Short on resources, just stop for now.
2433 */
2434 DPRINTF(WM_DEBUG_TX,
2435 ("%s: TX: dmamap load failed: %d\n",
2436 device_xname(sc->sc_dev), error));
2437 break;
2438 }
2439
2440 segs_needed = dmamap->dm_nsegs;
2441 if (use_tso) {
2442 /* For sentinel descriptor; see below. */
2443 segs_needed++;
2444 }
2445
2446 /*
2447 * Ensure we have enough descriptors free to describe
2448 * the packet. Note, we always reserve one descriptor
2449 * at the end of the ring due to the semantics of the
2450 * TDT register, plus one more in the event we need
2451 * to load offload context.
2452 */
2453 if (segs_needed > sc->sc_txfree - 2) {
2454 /*
2455 * Not enough free descriptors to transmit this
2456 * packet. We haven't committed anything yet,
2457 * so just unload the DMA map, put the packet
2458 * pack on the queue, and punt. Notify the upper
2459 * layer that there are no more slots left.
2460 */
2461 DPRINTF(WM_DEBUG_TX,
2462 ("%s: TX: need %d (%d) descriptors, have %d\n",
2463 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2464 segs_needed, sc->sc_txfree - 1));
2465 ifp->if_flags |= IFF_OACTIVE;
2466 bus_dmamap_unload(sc->sc_dmat, dmamap);
2467 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2468 break;
2469 }
2470
2471 /*
2472 * Check for 82547 Tx FIFO bug. We need to do this
2473 * once we know we can transmit the packet, since we
2474 * do some internal FIFO space accounting here.
2475 */
2476 if (sc->sc_type == WM_T_82547 &&
2477 wm_82547_txfifo_bugchk(sc, m0)) {
2478 DPRINTF(WM_DEBUG_TX,
2479 ("%s: TX: 82547 Tx FIFO bug detected\n",
2480 device_xname(sc->sc_dev)));
2481 ifp->if_flags |= IFF_OACTIVE;
2482 bus_dmamap_unload(sc->sc_dmat, dmamap);
2483 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2484 break;
2485 }
2486
2487 IFQ_DEQUEUE(&ifp->if_snd, m0);
2488
2489 /*
2490 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2491 */
2492
2493 DPRINTF(WM_DEBUG_TX,
2494 ("%s: TX: packet has %d (%d) DMA segments\n",
2495 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2496
2497 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2498
2499 /*
2500 * Store a pointer to the packet so that we can free it
2501 * later.
2502 *
2503 * Initially, we consider the number of descriptors the
2504 * packet uses the number of DMA segments. This may be
2505 * incremented by 1 if we do checksum offload (a descriptor
2506 * is used to set the checksum context).
2507 */
2508 txs->txs_mbuf = m0;
2509 txs->txs_firstdesc = sc->sc_txnext;
2510 txs->txs_ndesc = segs_needed;
2511
2512 /* Set up offload parameters for this packet. */
2513 if (m0->m_pkthdr.csum_flags &
2514 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2515 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2516 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2517 if (wm_tx_offload(sc, txs, &cksumcmd,
2518 &cksumfields) != 0) {
2519 /* Error message already displayed. */
2520 bus_dmamap_unload(sc->sc_dmat, dmamap);
2521 continue;
2522 }
2523 } else {
2524 cksumcmd = 0;
2525 cksumfields = 0;
2526 }
2527
2528 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2529
2530 /* Sync the DMA map. */
2531 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2532 BUS_DMASYNC_PREWRITE);
2533
2534 /*
2535 * Initialize the transmit descriptor.
2536 */
2537 for (nexttx = sc->sc_txnext, seg = 0;
2538 seg < dmamap->dm_nsegs; seg++) {
2539 for (seglen = dmamap->dm_segs[seg].ds_len,
2540 curaddr = dmamap->dm_segs[seg].ds_addr;
2541 seglen != 0;
2542 curaddr += curlen, seglen -= curlen,
2543 nexttx = WM_NEXTTX(sc, nexttx)) {
2544 curlen = seglen;
2545
2546 /*
2547 * So says the Linux driver:
2548 * Work around for premature descriptor
2549 * write-backs in TSO mode. Append a
2550 * 4-byte sentinel descriptor.
2551 */
2552 if (use_tso &&
2553 seg == dmamap->dm_nsegs - 1 &&
2554 curlen > 8)
2555 curlen -= 4;
2556
2557 wm_set_dma_addr(
2558 &sc->sc_txdescs[nexttx].wtx_addr,
2559 curaddr);
2560 sc->sc_txdescs[nexttx].wtx_cmdlen =
2561 htole32(cksumcmd | curlen);
2562 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2563 0;
2564 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2565 cksumfields;
2566 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2567 lasttx = nexttx;
2568
2569 DPRINTF(WM_DEBUG_TX,
2570 ("%s: TX: desc %d: low 0x%08lx, "
2571 "len 0x%04x\n",
2572 device_xname(sc->sc_dev), nexttx,
2573 curaddr & 0xffffffffUL, (unsigned)curlen));
2574 }
2575 }
2576
2577 KASSERT(lasttx != -1);
2578
2579 /*
2580 * Set up the command byte on the last descriptor of
2581 * the packet. If we're in the interrupt delay window,
2582 * delay the interrupt.
2583 */
2584 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2585 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2586
2587 /*
2588 * If VLANs are enabled and the packet has a VLAN tag, set
2589 * up the descriptor to encapsulate the packet for us.
2590 *
2591 * This is only valid on the last descriptor of the packet.
2592 */
2593 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2594 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2595 htole32(WTX_CMD_VLE);
2596 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2597 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2598 }
2599
2600 txs->txs_lastdesc = lasttx;
2601
2602 DPRINTF(WM_DEBUG_TX,
2603 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2604 device_xname(sc->sc_dev),
2605 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2606
2607 /* Sync the descriptors we're using. */
2608 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2609 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2610
2611 /* Give the packet to the chip. */
2612 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2613
2614 DPRINTF(WM_DEBUG_TX,
2615 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2616
2617 DPRINTF(WM_DEBUG_TX,
2618 ("%s: TX: finished transmitting packet, job %d\n",
2619 device_xname(sc->sc_dev), sc->sc_txsnext));
2620
2621 /* Advance the tx pointer. */
2622 sc->sc_txfree -= txs->txs_ndesc;
2623 sc->sc_txnext = nexttx;
2624
2625 sc->sc_txsfree--;
2626 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2627
2628 /* Pass the packet to any BPF listeners. */
2629 if (ifp->if_bpf)
2630 bpf_ops->bpf_mtap(ifp->if_bpf, m0);
2631 }
2632
2633 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2634 /* No more slots; notify upper layer. */
2635 ifp->if_flags |= IFF_OACTIVE;
2636 }
2637
2638 if (sc->sc_txfree != ofree) {
2639 /* Set a watchdog timer in case the chip flakes out. */
2640 ifp->if_timer = 5;
2641 }
2642 }
2643
2644 /*
2645 * wm_watchdog: [ifnet interface function]
2646 *
2647 * Watchdog timer handler.
2648 */
2649 static void
2650 wm_watchdog(struct ifnet *ifp)
2651 {
2652 struct wm_softc *sc = ifp->if_softc;
2653
2654 /*
2655 * Since we're using delayed interrupts, sweep up
2656 * before we report an error.
2657 */
2658 wm_txintr(sc);
2659
2660 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2661 log(LOG_ERR,
2662 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2663 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2664 sc->sc_txnext);
2665 ifp->if_oerrors++;
2666
2667 /* Reset the interface. */
2668 (void) wm_init(ifp);
2669 }
2670
2671 /* Try to get more packets going. */
2672 wm_start(ifp);
2673 }
2674
2675 /*
2676 * wm_ioctl: [ifnet interface function]
2677 *
2678 * Handle control requests from the operator.
2679 */
2680 static int
2681 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2682 {
2683 struct wm_softc *sc = ifp->if_softc;
2684 struct ifreq *ifr = (struct ifreq *) data;
2685 struct ifaddr *ifa = (struct ifaddr *)data;
2686 struct sockaddr_dl *sdl;
2687 int diff, s, error;
2688
2689 s = splnet();
2690
2691 switch (cmd) {
2692 case SIOCSIFFLAGS:
2693 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2694 break;
2695 if (ifp->if_flags & IFF_UP) {
2696 diff = (ifp->if_flags ^ sc->sc_if_flags)
2697 & (IFF_PROMISC | IFF_ALLMULTI);
2698 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2699 /*
2700 * If the difference bettween last flag and
2701 * new flag is only IFF_PROMISC or
2702 * IFF_ALLMULTI, set multicast filter only
2703 * (don't reset to prevent link down).
2704 */
2705 wm_set_filter(sc);
2706 } else {
2707 /*
2708 * Reset the interface to pick up changes in
2709 * any other flags that affect the hardware
2710 * state.
2711 */
2712 wm_init(ifp);
2713 }
2714 } else {
2715 if (ifp->if_flags & IFF_RUNNING)
2716 wm_stop(ifp, 1);
2717 }
2718 sc->sc_if_flags = ifp->if_flags;
2719 error = 0;
2720 break;
2721 case SIOCSIFMEDIA:
2722 case SIOCGIFMEDIA:
2723 /* Flow control requires full-duplex mode. */
2724 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2725 (ifr->ifr_media & IFM_FDX) == 0)
2726 ifr->ifr_media &= ~IFM_ETH_FMASK;
2727 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2728 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2729 /* We can do both TXPAUSE and RXPAUSE. */
2730 ifr->ifr_media |=
2731 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2732 }
2733 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2734 }
2735 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2736 break;
2737 case SIOCINITIFADDR:
2738 if (ifa->ifa_addr->sa_family == AF_LINK) {
2739 sdl = satosdl(ifp->if_dl->ifa_addr);
2740 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2741 LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2742 /* unicast address is first multicast entry */
2743 wm_set_filter(sc);
2744 error = 0;
2745 break;
2746 }
2747 /* Fall through for rest */
2748 default:
2749 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2750 break;
2751
2752 error = 0;
2753
2754 if (cmd == SIOCSIFCAP)
2755 error = (*ifp->if_init)(ifp);
2756 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2757 ;
2758 else if (ifp->if_flags & IFF_RUNNING) {
2759 /*
2760 * Multicast list has changed; set the hardware filter
2761 * accordingly.
2762 */
2763 wm_set_filter(sc);
2764 }
2765 break;
2766 }
2767
2768 /* Try to get more packets going. */
2769 wm_start(ifp);
2770
2771 splx(s);
2772 return error;
2773 }
2774
2775 /*
2776 * wm_intr:
2777 *
2778 * Interrupt service routine.
2779 */
2780 static int
2781 wm_intr(void *arg)
2782 {
2783 struct wm_softc *sc = arg;
2784 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2785 uint32_t icr;
2786 int handled = 0;
2787
2788 while (1 /* CONSTCOND */) {
2789 icr = CSR_READ(sc, WMREG_ICR);
2790 if ((icr & sc->sc_icr) == 0)
2791 break;
2792 #if 0 /*NRND > 0*/
2793 if (RND_ENABLED(&sc->rnd_source))
2794 rnd_add_uint32(&sc->rnd_source, icr);
2795 #endif
2796
2797 handled = 1;
2798
2799 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2800 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2801 DPRINTF(WM_DEBUG_RX,
2802 ("%s: RX: got Rx intr 0x%08x\n",
2803 device_xname(sc->sc_dev),
2804 icr & (ICR_RXDMT0|ICR_RXT0)));
2805 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2806 }
2807 #endif
2808 wm_rxintr(sc);
2809
2810 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2811 if (icr & ICR_TXDW) {
2812 DPRINTF(WM_DEBUG_TX,
2813 ("%s: TX: got TXDW interrupt\n",
2814 device_xname(sc->sc_dev)));
2815 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2816 }
2817 #endif
2818 wm_txintr(sc);
2819
2820 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2821 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2822 wm_linkintr(sc, icr);
2823 }
2824
2825 if (icr & ICR_RXO) {
2826 #if defined(WM_DEBUG)
2827 log(LOG_WARNING, "%s: Receive overrun\n",
2828 device_xname(sc->sc_dev));
2829 #endif /* defined(WM_DEBUG) */
2830 }
2831 }
2832
2833 if (handled) {
2834 /* Try to get more packets going. */
2835 wm_start(ifp);
2836 }
2837
2838 return handled;
2839 }
2840
2841 /*
2842 * wm_txintr:
2843 *
2844 * Helper; handle transmit interrupts.
2845 */
2846 static void
2847 wm_txintr(struct wm_softc *sc)
2848 {
2849 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2850 struct wm_txsoft *txs;
2851 uint8_t status;
2852 int i;
2853
2854 ifp->if_flags &= ~IFF_OACTIVE;
2855
2856 /*
2857 * Go through the Tx list and free mbufs for those
2858 * frames which have been transmitted.
2859 */
2860 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2861 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2862 txs = &sc->sc_txsoft[i];
2863
2864 DPRINTF(WM_DEBUG_TX,
2865 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2866
2867 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2868 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2869
2870 status =
2871 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2872 if ((status & WTX_ST_DD) == 0) {
2873 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2874 BUS_DMASYNC_PREREAD);
2875 break;
2876 }
2877
2878 DPRINTF(WM_DEBUG_TX,
2879 ("%s: TX: job %d done: descs %d..%d\n",
2880 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2881 txs->txs_lastdesc));
2882
2883 /*
2884 * XXX We should probably be using the statistics
2885 * XXX registers, but I don't know if they exist
2886 * XXX on chips before the i82544.
2887 */
2888
2889 #ifdef WM_EVENT_COUNTERS
2890 if (status & WTX_ST_TU)
2891 WM_EVCNT_INCR(&sc->sc_ev_tu);
2892 #endif /* WM_EVENT_COUNTERS */
2893
2894 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2895 ifp->if_oerrors++;
2896 if (status & WTX_ST_LC)
2897 log(LOG_WARNING, "%s: late collision\n",
2898 device_xname(sc->sc_dev));
2899 else if (status & WTX_ST_EC) {
2900 ifp->if_collisions += 16;
2901 log(LOG_WARNING, "%s: excessive collisions\n",
2902 device_xname(sc->sc_dev));
2903 }
2904 } else
2905 ifp->if_opackets++;
2906
2907 sc->sc_txfree += txs->txs_ndesc;
2908 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2909 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2910 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2911 m_freem(txs->txs_mbuf);
2912 txs->txs_mbuf = NULL;
2913 }
2914
2915 /* Update the dirty transmit buffer pointer. */
2916 sc->sc_txsdirty = i;
2917 DPRINTF(WM_DEBUG_TX,
2918 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2919
2920 /*
2921 * If there are no more pending transmissions, cancel the watchdog
2922 * timer.
2923 */
2924 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2925 ifp->if_timer = 0;
2926 }
2927
2928 /*
2929 * wm_rxintr:
2930 *
2931 * Helper; handle receive interrupts.
2932 */
2933 static void
2934 wm_rxintr(struct wm_softc *sc)
2935 {
2936 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2937 struct wm_rxsoft *rxs;
2938 struct mbuf *m;
2939 int i, len;
2940 uint8_t status, errors;
2941 uint16_t vlantag;
2942
2943 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2944 rxs = &sc->sc_rxsoft[i];
2945
2946 DPRINTF(WM_DEBUG_RX,
2947 ("%s: RX: checking descriptor %d\n",
2948 device_xname(sc->sc_dev), i));
2949
2950 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2951
2952 status = sc->sc_rxdescs[i].wrx_status;
2953 errors = sc->sc_rxdescs[i].wrx_errors;
2954 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2955 vlantag = sc->sc_rxdescs[i].wrx_special;
2956
2957 if ((status & WRX_ST_DD) == 0) {
2958 /*
2959 * We have processed all of the receive descriptors.
2960 */
2961 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2962 break;
2963 }
2964
2965 if (__predict_false(sc->sc_rxdiscard)) {
2966 DPRINTF(WM_DEBUG_RX,
2967 ("%s: RX: discarding contents of descriptor %d\n",
2968 device_xname(sc->sc_dev), i));
2969 WM_INIT_RXDESC(sc, i);
2970 if (status & WRX_ST_EOP) {
2971 /* Reset our state. */
2972 DPRINTF(WM_DEBUG_RX,
2973 ("%s: RX: resetting rxdiscard -> 0\n",
2974 device_xname(sc->sc_dev)));
2975 sc->sc_rxdiscard = 0;
2976 }
2977 continue;
2978 }
2979
2980 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2981 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2982
2983 m = rxs->rxs_mbuf;
2984
2985 /*
2986 * Add a new receive buffer to the ring, unless of
2987 * course the length is zero. Treat the latter as a
2988 * failed mapping.
2989 */
2990 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2991 /*
2992 * Failed, throw away what we've done so
2993 * far, and discard the rest of the packet.
2994 */
2995 ifp->if_ierrors++;
2996 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2997 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2998 WM_INIT_RXDESC(sc, i);
2999 if ((status & WRX_ST_EOP) == 0)
3000 sc->sc_rxdiscard = 1;
3001 if (sc->sc_rxhead != NULL)
3002 m_freem(sc->sc_rxhead);
3003 WM_RXCHAIN_RESET(sc);
3004 DPRINTF(WM_DEBUG_RX,
3005 ("%s: RX: Rx buffer allocation failed, "
3006 "dropping packet%s\n", device_xname(sc->sc_dev),
3007 sc->sc_rxdiscard ? " (discard)" : ""));
3008 continue;
3009 }
3010
3011 m->m_len = len;
3012 sc->sc_rxlen += len;
3013 DPRINTF(WM_DEBUG_RX,
3014 ("%s: RX: buffer at %p len %d\n",
3015 device_xname(sc->sc_dev), m->m_data, len));
3016
3017 /*
3018 * If this is not the end of the packet, keep
3019 * looking.
3020 */
3021 if ((status & WRX_ST_EOP) == 0) {
3022 WM_RXCHAIN_LINK(sc, m);
3023 DPRINTF(WM_DEBUG_RX,
3024 ("%s: RX: not yet EOP, rxlen -> %d\n",
3025 device_xname(sc->sc_dev), sc->sc_rxlen));
3026 continue;
3027 }
3028
3029 /*
3030 * Okay, we have the entire packet now. The chip is
3031 * configured to include the FCS (not all chips can
3032 * be configured to strip it), so we need to trim it.
3033 * May need to adjust length of previous mbuf in the
3034 * chain if the current mbuf is too short.
3035 */
3036 if (m->m_len < ETHER_CRC_LEN) {
3037 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
3038 m->m_len = 0;
3039 } else {
3040 m->m_len -= ETHER_CRC_LEN;
3041 }
3042 len = sc->sc_rxlen - ETHER_CRC_LEN;
3043
3044 WM_RXCHAIN_LINK(sc, m);
3045
3046 *sc->sc_rxtailp = NULL;
3047 m = sc->sc_rxhead;
3048
3049 WM_RXCHAIN_RESET(sc);
3050
3051 DPRINTF(WM_DEBUG_RX,
3052 ("%s: RX: have entire packet, len -> %d\n",
3053 device_xname(sc->sc_dev), len));
3054
3055 /*
3056 * If an error occurred, update stats and drop the packet.
3057 */
3058 if (errors &
3059 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3060 if (errors & WRX_ER_SE)
3061 log(LOG_WARNING, "%s: symbol error\n",
3062 device_xname(sc->sc_dev));
3063 else if (errors & WRX_ER_SEQ)
3064 log(LOG_WARNING, "%s: receive sequence error\n",
3065 device_xname(sc->sc_dev));
3066 else if (errors & WRX_ER_CE)
3067 log(LOG_WARNING, "%s: CRC error\n",
3068 device_xname(sc->sc_dev));
3069 m_freem(m);
3070 continue;
3071 }
3072
3073 /*
3074 * No errors. Receive the packet.
3075 */
3076 m->m_pkthdr.rcvif = ifp;
3077 m->m_pkthdr.len = len;
3078
3079 /*
3080 * If VLANs are enabled, VLAN packets have been unwrapped
3081 * for us. Associate the tag with the packet.
3082 */
3083 if ((status & WRX_ST_VP) != 0) {
3084 VLAN_INPUT_TAG(ifp, m,
3085 le16toh(vlantag),
3086 continue);
3087 }
3088
3089 /*
3090 * Set up checksum info for this packet.
3091 */
3092 if ((status & WRX_ST_IXSM) == 0) {
3093 if (status & WRX_ST_IPCS) {
3094 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3095 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3096 if (errors & WRX_ER_IPE)
3097 m->m_pkthdr.csum_flags |=
3098 M_CSUM_IPv4_BAD;
3099 }
3100 if (status & WRX_ST_TCPCS) {
3101 /*
3102 * Note: we don't know if this was TCP or UDP,
3103 * so we just set both bits, and expect the
3104 * upper layers to deal.
3105 */
3106 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3107 m->m_pkthdr.csum_flags |=
3108 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3109 M_CSUM_TCPv6 | M_CSUM_UDPv6;
3110 if (errors & WRX_ER_TCPE)
3111 m->m_pkthdr.csum_flags |=
3112 M_CSUM_TCP_UDP_BAD;
3113 }
3114 }
3115
3116 ifp->if_ipackets++;
3117
3118 /* Pass this up to any BPF listeners. */
3119 if (ifp->if_bpf)
3120 bpf_ops->bpf_mtap(ifp->if_bpf, m);
3121
3122 /* Pass it on. */
3123 (*ifp->if_input)(ifp, m);
3124 }
3125
3126 /* Update the receive pointer. */
3127 sc->sc_rxptr = i;
3128
3129 DPRINTF(WM_DEBUG_RX,
3130 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3131 }
3132
3133 /*
3134 * wm_linkintr_gmii:
3135 *
3136 * Helper; handle link interrupts for GMII.
3137 */
3138 static void
3139 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3140 {
3141
3142 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3143 __func__));
3144
3145 if (icr & ICR_LSC) {
3146 DPRINTF(WM_DEBUG_LINK,
3147 ("%s: LINK: LSC -> mii_tick\n",
3148 device_xname(sc->sc_dev)));
3149 mii_tick(&sc->sc_mii);
3150 if (sc->sc_type == WM_T_82543) {
3151 int miistatus, active;
3152
3153 /*
3154 * With 82543, we need to force speed and
3155 * duplex on the MAC equal to what the PHY
3156 * speed and duplex configuration is.
3157 */
3158 miistatus = sc->sc_mii.mii_media_status;
3159
3160 if (miistatus & IFM_ACTIVE) {
3161 active = sc->sc_mii.mii_media_active;
3162 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3163 switch (IFM_SUBTYPE(active)) {
3164 case IFM_10_T:
3165 sc->sc_ctrl |= CTRL_SPEED_10;
3166 break;
3167 case IFM_100_TX:
3168 sc->sc_ctrl |= CTRL_SPEED_100;
3169 break;
3170 case IFM_1000_T:
3171 sc->sc_ctrl |= CTRL_SPEED_1000;
3172 break;
3173 default:
3174 /*
3175 * fiber?
3176 * Shoud not enter here.
3177 */
3178 printf("unknown media (%x)\n",
3179 active);
3180 break;
3181 }
3182 if (active & IFM_FDX)
3183 sc->sc_ctrl |= CTRL_FD;
3184 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3185 }
3186 } else if ((sc->sc_type == WM_T_ICH8)
3187 && (sc->sc_phytype == WMPHY_IGP_3)) {
3188 wm_kmrn_lock_loss_workaround_ich8lan(sc);
3189 } else if (sc->sc_type == WM_T_PCH) {
3190 wm_k1_gig_workaround_hv(sc,
3191 ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3192 }
3193
3194 if ((sc->sc_phytype == WMPHY_82578)
3195 && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3196 == IFM_1000_T)) {
3197
3198 if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3199 delay(200*1000); /* XXX too big */
3200
3201 /* Link stall fix for link up */
3202 wm_gmii_hv_writereg(sc->sc_dev, 1,
3203 HV_MUX_DATA_CTRL,
3204 HV_MUX_DATA_CTRL_GEN_TO_MAC
3205 | HV_MUX_DATA_CTRL_FORCE_SPEED);
3206 wm_gmii_hv_writereg(sc->sc_dev, 1,
3207 HV_MUX_DATA_CTRL,
3208 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3209 }
3210 }
3211 } else if (icr & ICR_RXSEQ) {
3212 DPRINTF(WM_DEBUG_LINK,
3213 ("%s: LINK Receive sequence error\n",
3214 device_xname(sc->sc_dev)));
3215 }
3216 }
3217
3218 /*
3219 * wm_linkintr_tbi:
3220 *
3221 * Helper; handle link interrupts for TBI mode.
3222 */
3223 static void
3224 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3225 {
3226 uint32_t status;
3227
3228 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3229 __func__));
3230
3231 status = CSR_READ(sc, WMREG_STATUS);
3232 if (icr & ICR_LSC) {
3233 if (status & STATUS_LU) {
3234 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3235 device_xname(sc->sc_dev),
3236 (status & STATUS_FD) ? "FDX" : "HDX"));
3237 /*
3238 * NOTE: CTRL will update TFCE and RFCE automatically,
3239 * so we should update sc->sc_ctrl
3240 */
3241
3242 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3243 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3244 sc->sc_fcrtl &= ~FCRTL_XONE;
3245 if (status & STATUS_FD)
3246 sc->sc_tctl |=
3247 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3248 else
3249 sc->sc_tctl |=
3250 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3251 if (sc->sc_ctrl & CTRL_TFCE)
3252 sc->sc_fcrtl |= FCRTL_XONE;
3253 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3254 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3255 WMREG_OLD_FCRTL : WMREG_FCRTL,
3256 sc->sc_fcrtl);
3257 sc->sc_tbi_linkup = 1;
3258 } else {
3259 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3260 device_xname(sc->sc_dev)));
3261 sc->sc_tbi_linkup = 0;
3262 }
3263 wm_tbi_set_linkled(sc);
3264 } else if (icr & ICR_RXCFG) {
3265 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3266 device_xname(sc->sc_dev)));
3267 sc->sc_tbi_nrxcfg++;
3268 wm_check_for_link(sc);
3269 } else if (icr & ICR_RXSEQ) {
3270 DPRINTF(WM_DEBUG_LINK,
3271 ("%s: LINK: Receive sequence error\n",
3272 device_xname(sc->sc_dev)));
3273 }
3274 }
3275
3276 /*
3277 * wm_linkintr:
3278 *
3279 * Helper; handle link interrupts.
3280 */
3281 static void
3282 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3283 {
3284
3285 if (sc->sc_flags & WM_F_HAS_MII)
3286 wm_linkintr_gmii(sc, icr);
3287 else
3288 wm_linkintr_tbi(sc, icr);
3289 }
3290
3291 /*
3292 * wm_tick:
3293 *
3294 * One second timer, used to check link status, sweep up
3295 * completed transmit jobs, etc.
3296 */
3297 static void
3298 wm_tick(void *arg)
3299 {
3300 struct wm_softc *sc = arg;
3301 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3302 int s;
3303
3304 s = splnet();
3305
3306 if (sc->sc_type >= WM_T_82542_2_1) {
3307 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3308 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3309 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3310 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3311 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3312 }
3313
3314 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3315 ifp->if_ierrors += 0ULL + /* ensure quad_t */
3316 + CSR_READ(sc, WMREG_CRCERRS)
3317 + CSR_READ(sc, WMREG_ALGNERRC)
3318 + CSR_READ(sc, WMREG_SYMERRC)
3319 + CSR_READ(sc, WMREG_RXERRC)
3320 + CSR_READ(sc, WMREG_SEC)
3321 + CSR_READ(sc, WMREG_CEXTERR)
3322 + CSR_READ(sc, WMREG_RLEC);
3323 ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3324
3325 if (sc->sc_flags & WM_F_HAS_MII)
3326 mii_tick(&sc->sc_mii);
3327 else
3328 wm_tbi_check_link(sc);
3329
3330 splx(s);
3331
3332 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3333 }
3334
3335 /*
3336 * wm_reset:
3337 *
3338 * Reset the i82542 chip.
3339 */
3340 static void
3341 wm_reset(struct wm_softc *sc)
3342 {
3343 int phy_reset = 0;
3344 uint32_t reg, mask;
3345 int i;
3346
3347 /*
3348 * Allocate on-chip memory according to the MTU size.
3349 * The Packet Buffer Allocation register must be written
3350 * before the chip is reset.
3351 */
3352 switch (sc->sc_type) {
3353 case WM_T_82547:
3354 case WM_T_82547_2:
3355 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3356 PBA_22K : PBA_30K;
3357 sc->sc_txfifo_head = 0;
3358 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3359 sc->sc_txfifo_size =
3360 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3361 sc->sc_txfifo_stall = 0;
3362 break;
3363 case WM_T_82571:
3364 case WM_T_82572:
3365 case WM_T_82575: /* XXX need special handing for jumbo frames */
3366 case WM_T_80003:
3367 sc->sc_pba = PBA_32K;
3368 break;
3369 case WM_T_82580:
3370 case WM_T_82580ER:
3371 sc->sc_pba = PBA_35K;
3372 break;
3373 case WM_T_82576:
3374 sc->sc_pba = PBA_64K;
3375 break;
3376 case WM_T_82573:
3377 sc->sc_pba = PBA_12K;
3378 break;
3379 case WM_T_82574:
3380 case WM_T_82583:
3381 sc->sc_pba = PBA_20K;
3382 break;
3383 case WM_T_ICH8:
3384 sc->sc_pba = PBA_8K;
3385 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3386 break;
3387 case WM_T_ICH9:
3388 case WM_T_ICH10:
3389 case WM_T_PCH:
3390 sc->sc_pba = PBA_10K;
3391 break;
3392 default:
3393 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3394 PBA_40K : PBA_48K;
3395 break;
3396 }
3397 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3398
3399 /* Prevent the PCI-E bus from sticking */
3400 if (sc->sc_flags & WM_F_PCIE) {
3401 int timeout = 800;
3402
3403 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3404 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3405
3406 while (timeout--) {
3407 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3408 break;
3409 delay(100);
3410 }
3411 }
3412
3413 /* Set the completion timeout for interface */
3414 if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
3415 wm_set_pcie_completion_timeout(sc);
3416
3417 /* Clear interrupt */
3418 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3419
3420 /* Stop the transmit and receive processes. */
3421 CSR_WRITE(sc, WMREG_RCTL, 0);
3422 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3423 sc->sc_rctl &= ~RCTL_EN;
3424
3425 /* XXX set_tbi_sbp_82543() */
3426
3427 delay(10*1000);
3428
3429 /* Must acquire the MDIO ownership before MAC reset */
3430 switch (sc->sc_type) {
3431 case WM_T_82573:
3432 case WM_T_82574:
3433 case WM_T_82583:
3434 i = 0;
3435 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3436 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3437 do {
3438 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3439 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3440 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3441 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3442 break;
3443 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3444 delay(2*1000);
3445 i++;
3446 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3447 break;
3448 default:
3449 break;
3450 }
3451
3452 /*
3453 * 82541 Errata 29? & 82547 Errata 28?
3454 * See also the description about PHY_RST bit in CTRL register
3455 * in 8254x_GBe_SDM.pdf.
3456 */
3457 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3458 CSR_WRITE(sc, WMREG_CTRL,
3459 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3460 delay(5000);
3461 }
3462
3463 switch (sc->sc_type) {
3464 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3465 case WM_T_82541:
3466 case WM_T_82541_2:
3467 case WM_T_82547:
3468 case WM_T_82547_2:
3469 /*
3470 * On some chipsets, a reset through a memory-mapped write
3471 * cycle can cause the chip to reset before completing the
3472 * write cycle. This causes major headache that can be
3473 * avoided by issuing the reset via indirect register writes
3474 * through I/O space.
3475 *
3476 * So, if we successfully mapped the I/O BAR at attach time,
3477 * use that. Otherwise, try our luck with a memory-mapped
3478 * reset.
3479 */
3480 if (sc->sc_flags & WM_F_IOH_VALID)
3481 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3482 else
3483 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3484 break;
3485 case WM_T_82545_3:
3486 case WM_T_82546_3:
3487 /* Use the shadow control register on these chips. */
3488 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3489 break;
3490 case WM_T_80003:
3491 mask = swfwphysem[sc->sc_funcid];
3492 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3493 wm_get_swfw_semaphore(sc, mask);
3494 CSR_WRITE(sc, WMREG_CTRL, reg);
3495 wm_put_swfw_semaphore(sc, mask);
3496 break;
3497 case WM_T_ICH8:
3498 case WM_T_ICH9:
3499 case WM_T_ICH10:
3500 case WM_T_PCH:
3501 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3502 if (wm_check_reset_block(sc) == 0) {
3503 if (sc->sc_type >= WM_T_PCH) {
3504 uint32_t status;
3505
3506 status = CSR_READ(sc, WMREG_STATUS);
3507 CSR_WRITE(sc, WMREG_STATUS,
3508 status & ~STATUS_PHYRA);
3509 }
3510
3511 reg |= CTRL_PHY_RESET;
3512 phy_reset = 1;
3513 }
3514 wm_get_swfwhw_semaphore(sc);
3515 CSR_WRITE(sc, WMREG_CTRL, reg);
3516 delay(20*1000);
3517 wm_put_swfwhw_semaphore(sc);
3518 break;
3519 case WM_T_82542_2_0:
3520 case WM_T_82542_2_1:
3521 case WM_T_82543:
3522 case WM_T_82540:
3523 case WM_T_82545:
3524 case WM_T_82546:
3525 case WM_T_82571:
3526 case WM_T_82572:
3527 case WM_T_82573:
3528 case WM_T_82574:
3529 case WM_T_82575:
3530 case WM_T_82576:
3531 case WM_T_82583:
3532 default:
3533 /* Everything else can safely use the documented method. */
3534 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3535 break;
3536 }
3537
3538 if (phy_reset != 0)
3539 wm_get_cfg_done(sc);
3540
3541 /* reload EEPROM */
3542 switch (sc->sc_type) {
3543 case WM_T_82542_2_0:
3544 case WM_T_82542_2_1:
3545 case WM_T_82543:
3546 case WM_T_82544:
3547 delay(10);
3548 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3549 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3550 delay(2000);
3551 break;
3552 case WM_T_82540:
3553 case WM_T_82545:
3554 case WM_T_82545_3:
3555 case WM_T_82546:
3556 case WM_T_82546_3:
3557 delay(5*1000);
3558 /* XXX Disable HW ARPs on ASF enabled adapters */
3559 break;
3560 case WM_T_82541:
3561 case WM_T_82541_2:
3562 case WM_T_82547:
3563 case WM_T_82547_2:
3564 delay(20000);
3565 /* XXX Disable HW ARPs on ASF enabled adapters */
3566 break;
3567 case WM_T_82571:
3568 case WM_T_82572:
3569 case WM_T_82573:
3570 case WM_T_82574:
3571 case WM_T_82583:
3572 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3573 delay(10);
3574 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3575 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3576 }
3577 /* check EECD_EE_AUTORD */
3578 wm_get_auto_rd_done(sc);
3579 /*
3580 * Phy configuration from NVM just starts after EECD_AUTO_RD
3581 * is set.
3582 */
3583 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3584 || (sc->sc_type == WM_T_82583))
3585 delay(25*1000);
3586 break;
3587 case WM_T_82575:
3588 case WM_T_82576:
3589 case WM_T_80003:
3590 case WM_T_ICH8:
3591 case WM_T_ICH9:
3592 /* check EECD_EE_AUTORD */
3593 wm_get_auto_rd_done(sc);
3594 break;
3595 case WM_T_ICH10:
3596 case WM_T_PCH:
3597 wm_lan_init_done(sc);
3598 break;
3599 default:
3600 panic("%s: unknown type\n", __func__);
3601 }
3602
3603 /* Check whether EEPROM is present or not */
3604 switch (sc->sc_type) {
3605 case WM_T_82575:
3606 case WM_T_82576:
3607 case WM_T_82580:
3608 case WM_T_ICH8:
3609 case WM_T_ICH9:
3610 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3611 /* Not found */
3612 sc->sc_flags |= WM_F_EEPROM_INVALID;
3613 if (sc->sc_type == WM_T_82575) /* 82575 only */
3614 wm_reset_init_script_82575(sc);
3615 }
3616 break;
3617 default:
3618 break;
3619 }
3620
3621 /* Clear any pending interrupt events. */
3622 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3623 reg = CSR_READ(sc, WMREG_ICR);
3624
3625 /* reload sc_ctrl */
3626 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3627
3628 /* dummy read from WUC */
3629 if (sc->sc_type == WM_T_PCH)
3630 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3631 /*
3632 * For PCH, this write will make sure that any noise will be detected
3633 * as a CRC error and be dropped rather than show up as a bad packet
3634 * to the DMA engine
3635 */
3636 if (sc->sc_type == WM_T_PCH)
3637 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3638
3639 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3640 CSR_WRITE(sc, WMREG_WUC, 0);
3641
3642 /* XXX need special handling for 82580 */
3643 }
3644
3645 /*
3646 * wm_init: [ifnet interface function]
3647 *
3648 * Initialize the interface. Must be called at splnet().
3649 */
3650 static int
3651 wm_init(struct ifnet *ifp)
3652 {
3653 struct wm_softc *sc = ifp->if_softc;
3654 struct wm_rxsoft *rxs;
3655 int i, error = 0;
3656 uint32_t reg;
3657
3658 /*
3659 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3660 * There is a small but measurable benefit to avoiding the adjusment
3661 * of the descriptor so that the headers are aligned, for normal mtu,
3662 * on such platforms. One possibility is that the DMA itself is
3663 * slightly more efficient if the front of the entire packet (instead
3664 * of the front of the headers) is aligned.
3665 *
3666 * Note we must always set align_tweak to 0 if we are using
3667 * jumbo frames.
3668 */
3669 #ifdef __NO_STRICT_ALIGNMENT
3670 sc->sc_align_tweak = 0;
3671 #else
3672 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3673 sc->sc_align_tweak = 0;
3674 else
3675 sc->sc_align_tweak = 2;
3676 #endif /* __NO_STRICT_ALIGNMENT */
3677
3678 /* Cancel any pending I/O. */
3679 wm_stop(ifp, 0);
3680
3681 /* update statistics before reset */
3682 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3683 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3684
3685 /* Reset the chip to a known state. */
3686 wm_reset(sc);
3687
3688 switch (sc->sc_type) {
3689 case WM_T_82571:
3690 case WM_T_82572:
3691 case WM_T_82573:
3692 case WM_T_82574:
3693 case WM_T_82583:
3694 case WM_T_80003:
3695 case WM_T_ICH8:
3696 case WM_T_ICH9:
3697 case WM_T_ICH10:
3698 case WM_T_PCH:
3699 if (wm_check_mng_mode(sc) != 0)
3700 wm_get_hw_control(sc);
3701 break;
3702 default:
3703 break;
3704 }
3705
3706 /* Reset the PHY. */
3707 if (sc->sc_flags & WM_F_HAS_MII)
3708 wm_gmii_reset(sc);
3709
3710 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3711 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3712 if (sc->sc_type == WM_T_PCH)
3713 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
3714
3715 /* Initialize the transmit descriptor ring. */
3716 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3717 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3718 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3719 sc->sc_txfree = WM_NTXDESC(sc);
3720 sc->sc_txnext = 0;
3721
3722 if (sc->sc_type < WM_T_82543) {
3723 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3724 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3725 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3726 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3727 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3728 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3729 } else {
3730 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3731 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3732 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3733 CSR_WRITE(sc, WMREG_TDH, 0);
3734 CSR_WRITE(sc, WMREG_TDT, 0);
3735 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3736 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3737
3738 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
3739 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
3740 | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
3741 | TXDCTL_WTHRESH(0));
3742 else {
3743 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3744 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3745 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3746 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3747 }
3748 }
3749 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3750 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3751
3752 /* Initialize the transmit job descriptors. */
3753 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3754 sc->sc_txsoft[i].txs_mbuf = NULL;
3755 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3756 sc->sc_txsnext = 0;
3757 sc->sc_txsdirty = 0;
3758
3759 /*
3760 * Initialize the receive descriptor and receive job
3761 * descriptor rings.
3762 */
3763 if (sc->sc_type < WM_T_82543) {
3764 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3765 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3766 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3767 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3768 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3769 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3770
3771 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3772 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3773 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3774 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3775 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3776 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3777 } else {
3778 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3779 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3780 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3781 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3782 CSR_WRITE(sc, WMREG_EITR(0), 450);
3783 if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
3784 panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
3785 CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
3786 | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
3787 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
3788 | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
3789 | RXDCTL_WTHRESH(1));
3790 } else {
3791 CSR_WRITE(sc, WMREG_RDH, 0);
3792 CSR_WRITE(sc, WMREG_RDT, 0);
3793 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3794 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3795 }
3796 }
3797 for (i = 0; i < WM_NRXDESC; i++) {
3798 rxs = &sc->sc_rxsoft[i];
3799 if (rxs->rxs_mbuf == NULL) {
3800 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3801 log(LOG_ERR, "%s: unable to allocate or map rx "
3802 "buffer %d, error = %d\n",
3803 device_xname(sc->sc_dev), i, error);
3804 /*
3805 * XXX Should attempt to run with fewer receive
3806 * XXX buffers instead of just failing.
3807 */
3808 wm_rxdrain(sc);
3809 goto out;
3810 }
3811 } else {
3812 if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3813 WM_INIT_RXDESC(sc, i);
3814 }
3815 }
3816 sc->sc_rxptr = 0;
3817 sc->sc_rxdiscard = 0;
3818 WM_RXCHAIN_RESET(sc);
3819
3820 /*
3821 * Clear out the VLAN table -- we don't use it (yet).
3822 */
3823 CSR_WRITE(sc, WMREG_VET, 0);
3824 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3825 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3826
3827 /*
3828 * Set up flow-control parameters.
3829 *
3830 * XXX Values could probably stand some tuning.
3831 */
3832 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3833 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3834 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3835 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3836 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3837 }
3838
3839 sc->sc_fcrtl = FCRTL_DFLT;
3840 if (sc->sc_type < WM_T_82543) {
3841 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3842 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3843 } else {
3844 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3845 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3846 }
3847
3848 if (sc->sc_type == WM_T_80003)
3849 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3850 else
3851 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3852
3853 /* Deal with VLAN enables. */
3854 if (VLAN_ATTACHED(&sc->sc_ethercom))
3855 sc->sc_ctrl |= CTRL_VME;
3856 else
3857 sc->sc_ctrl &= ~CTRL_VME;
3858
3859 /* Write the control registers. */
3860 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3861
3862 if (sc->sc_flags & WM_F_HAS_MII) {
3863 int val;
3864
3865 switch (sc->sc_type) {
3866 case WM_T_80003:
3867 case WM_T_ICH8:
3868 case WM_T_ICH9:
3869 case WM_T_ICH10:
3870 case WM_T_PCH:
3871 /*
3872 * Set the mac to wait the maximum time between each
3873 * iteration and increase the max iterations when
3874 * polling the phy; this fixes erroneous timeouts at
3875 * 10Mbps.
3876 */
3877 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3878 0xFFFF);
3879 val = wm_kmrn_readreg(sc,
3880 KUMCTRLSTA_OFFSET_INB_PARAM);
3881 val |= 0x3F;
3882 wm_kmrn_writereg(sc,
3883 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3884 break;
3885 default:
3886 break;
3887 }
3888
3889 if (sc->sc_type == WM_T_80003) {
3890 val = CSR_READ(sc, WMREG_CTRL_EXT);
3891 val &= ~CTRL_EXT_LINK_MODE_MASK;
3892 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3893
3894 /* Bypass RX and TX FIFO's */
3895 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3896 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
3897 | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3898 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3899 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3900 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3901 }
3902 }
3903 #if 0
3904 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3905 #endif
3906
3907 /*
3908 * Set up checksum offload parameters.
3909 */
3910 reg = CSR_READ(sc, WMREG_RXCSUM);
3911 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3912 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3913 reg |= RXCSUM_IPOFL;
3914 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3915 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3916 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3917 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3918 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3919
3920 /* Reset TBI's RXCFG count */
3921 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3922
3923 /*
3924 * Set up the interrupt registers.
3925 */
3926 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3927 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3928 ICR_RXO | ICR_RXT0;
3929 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3930 sc->sc_icr |= ICR_RXCFG;
3931 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3932
3933 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3934 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3935 reg = CSR_READ(sc, WMREG_KABGTXD);
3936 reg |= KABGTXD_BGSQLBIAS;
3937 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3938 }
3939
3940 /* Set up the inter-packet gap. */
3941 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3942
3943 if (sc->sc_type >= WM_T_82543) {
3944 /*
3945 * Set up the interrupt throttling register (units of 256ns)
3946 * Note that a footnote in Intel's documentation says this
3947 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3948 * or 10Mbit mode. Empirically, it appears to be the case
3949 * that that is also true for the 1024ns units of the other
3950 * interrupt-related timer registers -- so, really, we ought
3951 * to divide this value by 4 when the link speed is low.
3952 *
3953 * XXX implement this division at link speed change!
3954 */
3955
3956 /*
3957 * For N interrupts/sec, set this value to:
3958 * 1000000000 / (N * 256). Note that we set the
3959 * absolute and packet timer values to this value
3960 * divided by 4 to get "simple timer" behavior.
3961 */
3962
3963 sc->sc_itr = 1500; /* 2604 ints/sec */
3964 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3965 }
3966
3967 /* Set the VLAN ethernetype. */
3968 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3969
3970 /*
3971 * Set up the transmit control register; we start out with
3972 * a collision distance suitable for FDX, but update it whe
3973 * we resolve the media type.
3974 */
3975 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3976 | TCTL_CT(TX_COLLISION_THRESHOLD)
3977 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3978 if (sc->sc_type >= WM_T_82571)
3979 sc->sc_tctl |= TCTL_MULR;
3980 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3981
3982 if (sc->sc_type == WM_T_80003) {
3983 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3984 reg &= ~TCTL_EXT_GCEX_MASK;
3985 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3986 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3987 }
3988
3989 /* Set the media. */
3990 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3991 goto out;
3992
3993 /*
3994 * Set up the receive control register; we actually program
3995 * the register when we set the receive filter. Use multicast
3996 * address offset type 0.
3997 *
3998 * Only the i82544 has the ability to strip the incoming
3999 * CRC, so we don't enable that feature.
4000 */
4001 sc->sc_mchash_type = 0;
4002 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4003 | RCTL_MO(sc->sc_mchash_type);
4004
4005 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4006 && (ifp->if_mtu > ETHERMTU)) {
4007 sc->sc_rctl |= RCTL_LPE;
4008 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4009 CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4010 }
4011
4012 if (MCLBYTES == 2048) {
4013 sc->sc_rctl |= RCTL_2k;
4014 } else {
4015 if (sc->sc_type >= WM_T_82543) {
4016 switch (MCLBYTES) {
4017 case 4096:
4018 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4019 break;
4020 case 8192:
4021 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4022 break;
4023 case 16384:
4024 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4025 break;
4026 default:
4027 panic("wm_init: MCLBYTES %d unsupported",
4028 MCLBYTES);
4029 break;
4030 }
4031 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
4032 }
4033
4034 /* Set the receive filter. */
4035 wm_set_filter(sc);
4036
4037 /* On 575 and later set RDT only if RX enabled... */
4038 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4039 for (i = 0; i < WM_NRXDESC; i++)
4040 WM_INIT_RXDESC(sc, i);
4041
4042 /* Start the one second link check clock. */
4043 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4044
4045 /* ...all done! */
4046 ifp->if_flags |= IFF_RUNNING;
4047 ifp->if_flags &= ~IFF_OACTIVE;
4048
4049 out:
4050 if (error)
4051 log(LOG_ERR, "%s: interface not running\n",
4052 device_xname(sc->sc_dev));
4053 return error;
4054 }
4055
4056 /*
4057 * wm_rxdrain:
4058 *
4059 * Drain the receive queue.
4060 */
4061 static void
4062 wm_rxdrain(struct wm_softc *sc)
4063 {
4064 struct wm_rxsoft *rxs;
4065 int i;
4066
4067 for (i = 0; i < WM_NRXDESC; i++) {
4068 rxs = &sc->sc_rxsoft[i];
4069 if (rxs->rxs_mbuf != NULL) {
4070 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4071 m_freem(rxs->rxs_mbuf);
4072 rxs->rxs_mbuf = NULL;
4073 }
4074 }
4075 }
4076
4077 /*
4078 * wm_stop: [ifnet interface function]
4079 *
4080 * Stop transmission on the interface.
4081 */
4082 static void
4083 wm_stop(struct ifnet *ifp, int disable)
4084 {
4085 struct wm_softc *sc = ifp->if_softc;
4086 struct wm_txsoft *txs;
4087 int i;
4088
4089 /* Stop the one second clock. */
4090 callout_stop(&sc->sc_tick_ch);
4091
4092 /* Stop the 82547 Tx FIFO stall check timer. */
4093 if (sc->sc_type == WM_T_82547)
4094 callout_stop(&sc->sc_txfifo_ch);
4095
4096 if (sc->sc_flags & WM_F_HAS_MII) {
4097 /* Down the MII. */
4098 mii_down(&sc->sc_mii);
4099 } else {
4100 #if 0
4101 /* Should we clear PHY's status properly? */
4102 wm_reset(sc);
4103 #endif
4104 }
4105
4106 /* Stop the transmit and receive processes. */
4107 CSR_WRITE(sc, WMREG_TCTL, 0);
4108 CSR_WRITE(sc, WMREG_RCTL, 0);
4109 sc->sc_rctl &= ~RCTL_EN;
4110
4111 /*
4112 * Clear the interrupt mask to ensure the device cannot assert its
4113 * interrupt line.
4114 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4115 * any currently pending or shared interrupt.
4116 */
4117 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4118 sc->sc_icr = 0;
4119
4120 /* Release any queued transmit buffers. */
4121 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4122 txs = &sc->sc_txsoft[i];
4123 if (txs->txs_mbuf != NULL) {
4124 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4125 m_freem(txs->txs_mbuf);
4126 txs->txs_mbuf = NULL;
4127 }
4128 }
4129
4130 /* Mark the interface as down and cancel the watchdog timer. */
4131 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4132 ifp->if_timer = 0;
4133
4134 if (disable)
4135 wm_rxdrain(sc);
4136
4137 #if 0 /* notyet */
4138 if (sc->sc_type >= WM_T_82544)
4139 CSR_WRITE(sc, WMREG_WUC, 0);
4140 #endif
4141 }
4142
4143 void
4144 wm_get_auto_rd_done(struct wm_softc *sc)
4145 {
4146 int i;
4147
4148 /* wait for eeprom to reload */
4149 switch (sc->sc_type) {
4150 case WM_T_82571:
4151 case WM_T_82572:
4152 case WM_T_82573:
4153 case WM_T_82574:
4154 case WM_T_82583:
4155 case WM_T_82575:
4156 case WM_T_82576:
4157 case WM_T_80003:
4158 case WM_T_ICH8:
4159 case WM_T_ICH9:
4160 for (i = 0; i < 10; i++) {
4161 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4162 break;
4163 delay(1000);
4164 }
4165 if (i == 10) {
4166 log(LOG_ERR, "%s: auto read from eeprom failed to "
4167 "complete\n", device_xname(sc->sc_dev));
4168 }
4169 break;
4170 default:
4171 break;
4172 }
4173 }
4174
4175 void
4176 wm_lan_init_done(struct wm_softc *sc)
4177 {
4178 uint32_t reg = 0;
4179 int i;
4180
4181 /* wait for eeprom to reload */
4182 switch (sc->sc_type) {
4183 case WM_T_ICH10:
4184 case WM_T_PCH:
4185 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4186 reg = CSR_READ(sc, WMREG_STATUS);
4187 if ((reg & STATUS_LAN_INIT_DONE) != 0)
4188 break;
4189 delay(100);
4190 }
4191 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4192 log(LOG_ERR, "%s: %s: lan_init_done failed to "
4193 "complete\n", device_xname(sc->sc_dev), __func__);
4194 }
4195 break;
4196 default:
4197 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4198 __func__);
4199 break;
4200 }
4201
4202 reg &= ~STATUS_LAN_INIT_DONE;
4203 CSR_WRITE(sc, WMREG_STATUS, reg);
4204 }
4205
4206 void
4207 wm_get_cfg_done(struct wm_softc *sc)
4208 {
4209 int mask;
4210 uint32_t reg;
4211 int i;
4212
4213 /* wait for eeprom to reload */
4214 switch (sc->sc_type) {
4215 case WM_T_82542_2_0:
4216 case WM_T_82542_2_1:
4217 /* null */
4218 break;
4219 case WM_T_82543:
4220 case WM_T_82544:
4221 case WM_T_82540:
4222 case WM_T_82545:
4223 case WM_T_82545_3:
4224 case WM_T_82546:
4225 case WM_T_82546_3:
4226 case WM_T_82541:
4227 case WM_T_82541_2:
4228 case WM_T_82547:
4229 case WM_T_82547_2:
4230 case WM_T_82573:
4231 case WM_T_82574:
4232 case WM_T_82583:
4233 /* generic */
4234 delay(10*1000);
4235 break;
4236 case WM_T_80003:
4237 case WM_T_82571:
4238 case WM_T_82572:
4239 case WM_T_82575:
4240 case WM_T_82576:
4241 case WM_T_82580:
4242 mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
4243 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
4244 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
4245 break;
4246 delay(1000);
4247 }
4248 if (i >= WM_PHY_CFG_TIMEOUT) {
4249 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
4250 device_xname(sc->sc_dev), __func__));
4251 }
4252 break;
4253 case WM_T_ICH8:
4254 case WM_T_ICH9:
4255 case WM_T_ICH10:
4256 case WM_T_PCH:
4257 if (sc->sc_type >= WM_T_PCH) {
4258 reg = CSR_READ(sc, WMREG_STATUS);
4259 if ((reg & STATUS_PHYRA) != 0)
4260 CSR_WRITE(sc, WMREG_STATUS,
4261 reg & ~STATUS_PHYRA);
4262 }
4263 delay(10*1000);
4264 break;
4265 default:
4266 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4267 __func__);
4268 break;
4269 }
4270 }
4271
4272 /*
4273 * wm_acquire_eeprom:
4274 *
4275 * Perform the EEPROM handshake required on some chips.
4276 */
4277 static int
4278 wm_acquire_eeprom(struct wm_softc *sc)
4279 {
4280 uint32_t reg;
4281 int x;
4282 int ret = 0;
4283
4284 /* always success */
4285 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4286 return 0;
4287
4288 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
4289 ret = wm_get_swfwhw_semaphore(sc);
4290 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
4291 /* this will also do wm_get_swsm_semaphore() if needed */
4292 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
4293 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4294 ret = wm_get_swsm_semaphore(sc);
4295 }
4296
4297 if (ret) {
4298 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4299 __func__);
4300 return 1;
4301 }
4302
4303 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4304 reg = CSR_READ(sc, WMREG_EECD);
4305
4306 /* Request EEPROM access. */
4307 reg |= EECD_EE_REQ;
4308 CSR_WRITE(sc, WMREG_EECD, reg);
4309
4310 /* ..and wait for it to be granted. */
4311 for (x = 0; x < 1000; x++) {
4312 reg = CSR_READ(sc, WMREG_EECD);
4313 if (reg & EECD_EE_GNT)
4314 break;
4315 delay(5);
4316 }
4317 if ((reg & EECD_EE_GNT) == 0) {
4318 aprint_error_dev(sc->sc_dev,
4319 "could not acquire EEPROM GNT\n");
4320 reg &= ~EECD_EE_REQ;
4321 CSR_WRITE(sc, WMREG_EECD, reg);
4322 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4323 wm_put_swfwhw_semaphore(sc);
4324 if (sc->sc_flags & WM_F_SWFW_SYNC)
4325 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4326 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4327 wm_put_swsm_semaphore(sc);
4328 return 1;
4329 }
4330 }
4331
4332 return 0;
4333 }
4334
4335 /*
4336 * wm_release_eeprom:
4337 *
4338 * Release the EEPROM mutex.
4339 */
4340 static void
4341 wm_release_eeprom(struct wm_softc *sc)
4342 {
4343 uint32_t reg;
4344
4345 /* always success */
4346 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
4347 return;
4348
4349 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
4350 reg = CSR_READ(sc, WMREG_EECD);
4351 reg &= ~EECD_EE_REQ;
4352 CSR_WRITE(sc, WMREG_EECD, reg);
4353 }
4354
4355 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4356 wm_put_swfwhw_semaphore(sc);
4357 if (sc->sc_flags & WM_F_SWFW_SYNC)
4358 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4359 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4360 wm_put_swsm_semaphore(sc);
4361 }
4362
4363 /*
4364 * wm_eeprom_sendbits:
4365 *
4366 * Send a series of bits to the EEPROM.
4367 */
4368 static void
4369 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4370 {
4371 uint32_t reg;
4372 int x;
4373
4374 reg = CSR_READ(sc, WMREG_EECD);
4375
4376 for (x = nbits; x > 0; x--) {
4377 if (bits & (1U << (x - 1)))
4378 reg |= EECD_DI;
4379 else
4380 reg &= ~EECD_DI;
4381 CSR_WRITE(sc, WMREG_EECD, reg);
4382 delay(2);
4383 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4384 delay(2);
4385 CSR_WRITE(sc, WMREG_EECD, reg);
4386 delay(2);
4387 }
4388 }
4389
4390 /*
4391 * wm_eeprom_recvbits:
4392 *
4393 * Receive a series of bits from the EEPROM.
4394 */
4395 static void
4396 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4397 {
4398 uint32_t reg, val;
4399 int x;
4400
4401 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4402
4403 val = 0;
4404 for (x = nbits; x > 0; x--) {
4405 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4406 delay(2);
4407 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4408 val |= (1U << (x - 1));
4409 CSR_WRITE(sc, WMREG_EECD, reg);
4410 delay(2);
4411 }
4412 *valp = val;
4413 }
4414
4415 /*
4416 * wm_read_eeprom_uwire:
4417 *
4418 * Read a word from the EEPROM using the MicroWire protocol.
4419 */
4420 static int
4421 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4422 {
4423 uint32_t reg, val;
4424 int i;
4425
4426 for (i = 0; i < wordcnt; i++) {
4427 /* Clear SK and DI. */
4428 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4429 CSR_WRITE(sc, WMREG_EECD, reg);
4430
4431 /* Set CHIP SELECT. */
4432 reg |= EECD_CS;
4433 CSR_WRITE(sc, WMREG_EECD, reg);
4434 delay(2);
4435
4436 /* Shift in the READ command. */
4437 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4438
4439 /* Shift in address. */
4440 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4441
4442 /* Shift out the data. */
4443 wm_eeprom_recvbits(sc, &val, 16);
4444 data[i] = val & 0xffff;
4445
4446 /* Clear CHIP SELECT. */
4447 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4448 CSR_WRITE(sc, WMREG_EECD, reg);
4449 delay(2);
4450 }
4451
4452 return 0;
4453 }
4454
4455 /*
4456 * wm_spi_eeprom_ready:
4457 *
4458 * Wait for a SPI EEPROM to be ready for commands.
4459 */
4460 static int
4461 wm_spi_eeprom_ready(struct wm_softc *sc)
4462 {
4463 uint32_t val;
4464 int usec;
4465
4466 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4467 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4468 wm_eeprom_recvbits(sc, &val, 8);
4469 if ((val & SPI_SR_RDY) == 0)
4470 break;
4471 }
4472 if (usec >= SPI_MAX_RETRIES) {
4473 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4474 return 1;
4475 }
4476 return 0;
4477 }
4478
4479 /*
4480 * wm_read_eeprom_spi:
4481 *
4482 * Read a work from the EEPROM using the SPI protocol.
4483 */
4484 static int
4485 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4486 {
4487 uint32_t reg, val;
4488 int i;
4489 uint8_t opc;
4490
4491 /* Clear SK and CS. */
4492 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4493 CSR_WRITE(sc, WMREG_EECD, reg);
4494 delay(2);
4495
4496 if (wm_spi_eeprom_ready(sc))
4497 return 1;
4498
4499 /* Toggle CS to flush commands. */
4500 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4501 delay(2);
4502 CSR_WRITE(sc, WMREG_EECD, reg);
4503 delay(2);
4504
4505 opc = SPI_OPC_READ;
4506 if (sc->sc_ee_addrbits == 8 && word >= 128)
4507 opc |= SPI_OPC_A8;
4508
4509 wm_eeprom_sendbits(sc, opc, 8);
4510 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4511
4512 for (i = 0; i < wordcnt; i++) {
4513 wm_eeprom_recvbits(sc, &val, 16);
4514 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4515 }
4516
4517 /* Raise CS and clear SK. */
4518 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4519 CSR_WRITE(sc, WMREG_EECD, reg);
4520 delay(2);
4521
4522 return 0;
4523 }
4524
4525 #define EEPROM_CHECKSUM 0xBABA
4526 #define EEPROM_SIZE 0x0040
4527
4528 /*
4529 * wm_validate_eeprom_checksum
4530 *
4531 * The checksum is defined as the sum of the first 64 (16 bit) words.
4532 */
4533 static int
4534 wm_validate_eeprom_checksum(struct wm_softc *sc)
4535 {
4536 uint16_t checksum;
4537 uint16_t eeprom_data;
4538 int i;
4539
4540 checksum = 0;
4541
4542 for (i = 0; i < EEPROM_SIZE; i++) {
4543 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4544 return 1;
4545 checksum += eeprom_data;
4546 }
4547
4548 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4549 return 1;
4550
4551 return 0;
4552 }
4553
4554 /*
4555 * wm_read_eeprom:
4556 *
4557 * Read data from the serial EEPROM.
4558 */
4559 static int
4560 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4561 {
4562 int rv;
4563
4564 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4565 return 1;
4566
4567 if (wm_acquire_eeprom(sc))
4568 return 1;
4569
4570 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4571 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4572 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4573 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4574 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4575 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4576 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4577 else
4578 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4579
4580 wm_release_eeprom(sc);
4581 return rv;
4582 }
4583
4584 static int
4585 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4586 uint16_t *data)
4587 {
4588 int i, eerd = 0;
4589 int error = 0;
4590
4591 for (i = 0; i < wordcnt; i++) {
4592 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4593
4594 CSR_WRITE(sc, WMREG_EERD, eerd);
4595 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4596 if (error != 0)
4597 break;
4598
4599 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4600 }
4601
4602 return error;
4603 }
4604
4605 static int
4606 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4607 {
4608 uint32_t attempts = 100000;
4609 uint32_t i, reg = 0;
4610 int32_t done = -1;
4611
4612 for (i = 0; i < attempts; i++) {
4613 reg = CSR_READ(sc, rw);
4614
4615 if (reg & EERD_DONE) {
4616 done = 0;
4617 break;
4618 }
4619 delay(5);
4620 }
4621
4622 return done;
4623 }
4624
4625 /*
4626 * wm_add_rxbuf:
4627 *
4628 * Add a receive buffer to the indiciated descriptor.
4629 */
4630 static int
4631 wm_add_rxbuf(struct wm_softc *sc, int idx)
4632 {
4633 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4634 struct mbuf *m;
4635 int error;
4636
4637 MGETHDR(m, M_DONTWAIT, MT_DATA);
4638 if (m == NULL)
4639 return ENOBUFS;
4640
4641 MCLGET(m, M_DONTWAIT);
4642 if ((m->m_flags & M_EXT) == 0) {
4643 m_freem(m);
4644 return ENOBUFS;
4645 }
4646
4647 if (rxs->rxs_mbuf != NULL)
4648 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4649
4650 rxs->rxs_mbuf = m;
4651
4652 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4653 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4654 BUS_DMA_READ|BUS_DMA_NOWAIT);
4655 if (error) {
4656 /* XXX XXX XXX */
4657 aprint_error_dev(sc->sc_dev,
4658 "unable to load rx DMA map %d, error = %d\n",
4659 idx, error);
4660 panic("wm_add_rxbuf");
4661 }
4662
4663 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4664 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4665
4666 if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4667 if ((sc->sc_rctl & RCTL_EN) != 0)
4668 WM_INIT_RXDESC(sc, idx);
4669 } else
4670 WM_INIT_RXDESC(sc, idx);
4671
4672 return 0;
4673 }
4674
4675 /*
4676 * wm_set_ral:
4677 *
4678 * Set an entery in the receive address list.
4679 */
4680 static void
4681 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4682 {
4683 uint32_t ral_lo, ral_hi;
4684
4685 if (enaddr != NULL) {
4686 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4687 (enaddr[3] << 24);
4688 ral_hi = enaddr[4] | (enaddr[5] << 8);
4689 ral_hi |= RAL_AV;
4690 } else {
4691 ral_lo = 0;
4692 ral_hi = 0;
4693 }
4694
4695 if (sc->sc_type >= WM_T_82544) {
4696 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4697 ral_lo);
4698 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4699 ral_hi);
4700 } else {
4701 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4702 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4703 }
4704 }
4705
4706 /*
4707 * wm_mchash:
4708 *
4709 * Compute the hash of the multicast address for the 4096-bit
4710 * multicast filter.
4711 */
4712 static uint32_t
4713 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4714 {
4715 static const int lo_shift[4] = { 4, 3, 2, 0 };
4716 static const int hi_shift[4] = { 4, 5, 6, 8 };
4717 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4718 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4719 uint32_t hash;
4720
4721 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4722 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4723 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4724 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4725 return (hash & 0x3ff);
4726 }
4727 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4728 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4729
4730 return (hash & 0xfff);
4731 }
4732
4733 /*
4734 * wm_set_filter:
4735 *
4736 * Set up the receive filter.
4737 */
4738 static void
4739 wm_set_filter(struct wm_softc *sc)
4740 {
4741 struct ethercom *ec = &sc->sc_ethercom;
4742 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4743 struct ether_multi *enm;
4744 struct ether_multistep step;
4745 bus_addr_t mta_reg;
4746 uint32_t hash, reg, bit;
4747 int i, size;
4748
4749 if (sc->sc_type >= WM_T_82544)
4750 mta_reg = WMREG_CORDOVA_MTA;
4751 else
4752 mta_reg = WMREG_MTA;
4753
4754 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4755
4756 if (ifp->if_flags & IFF_BROADCAST)
4757 sc->sc_rctl |= RCTL_BAM;
4758 if (ifp->if_flags & IFF_PROMISC) {
4759 sc->sc_rctl |= RCTL_UPE;
4760 goto allmulti;
4761 }
4762
4763 /*
4764 * Set the station address in the first RAL slot, and
4765 * clear the remaining slots.
4766 */
4767 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4768 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4769 size = WM_ICH8_RAL_TABSIZE;
4770 else
4771 size = WM_RAL_TABSIZE;
4772 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4773 for (i = 1; i < size; i++)
4774 wm_set_ral(sc, NULL, i);
4775
4776 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4777 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4778 size = WM_ICH8_MC_TABSIZE;
4779 else
4780 size = WM_MC_TABSIZE;
4781 /* Clear out the multicast table. */
4782 for (i = 0; i < size; i++)
4783 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4784
4785 ETHER_FIRST_MULTI(step, ec, enm);
4786 while (enm != NULL) {
4787 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4788 /*
4789 * We must listen to a range of multicast addresses.
4790 * For now, just accept all multicasts, rather than
4791 * trying to set only those filter bits needed to match
4792 * the range. (At this time, the only use of address
4793 * ranges is for IP multicast routing, for which the
4794 * range is big enough to require all bits set.)
4795 */
4796 goto allmulti;
4797 }
4798
4799 hash = wm_mchash(sc, enm->enm_addrlo);
4800
4801 reg = (hash >> 5);
4802 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4803 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4804 reg &= 0x1f;
4805 else
4806 reg &= 0x7f;
4807 bit = hash & 0x1f;
4808
4809 hash = CSR_READ(sc, mta_reg + (reg << 2));
4810 hash |= 1U << bit;
4811
4812 /* XXX Hardware bug?? */
4813 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4814 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4815 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4816 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4817 } else
4818 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4819
4820 ETHER_NEXT_MULTI(step, enm);
4821 }
4822
4823 ifp->if_flags &= ~IFF_ALLMULTI;
4824 goto setit;
4825
4826 allmulti:
4827 ifp->if_flags |= IFF_ALLMULTI;
4828 sc->sc_rctl |= RCTL_MPE;
4829
4830 setit:
4831 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4832 }
4833
4834 /*
4835 * wm_tbi_mediainit:
4836 *
4837 * Initialize media for use on 1000BASE-X devices.
4838 */
4839 static void
4840 wm_tbi_mediainit(struct wm_softc *sc)
4841 {
4842 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4843 const char *sep = "";
4844
4845 if (sc->sc_type < WM_T_82543)
4846 sc->sc_tipg = TIPG_WM_DFLT;
4847 else
4848 sc->sc_tipg = TIPG_LG_DFLT;
4849
4850 sc->sc_tbi_anegticks = 5;
4851
4852 /* Initialize our media structures */
4853 sc->sc_mii.mii_ifp = ifp;
4854
4855 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4856 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4857 wm_tbi_mediastatus);
4858
4859 /*
4860 * SWD Pins:
4861 *
4862 * 0 = Link LED (output)
4863 * 1 = Loss Of Signal (input)
4864 */
4865 sc->sc_ctrl |= CTRL_SWDPIO(0);
4866 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4867
4868 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4869
4870 #define ADD(ss, mm, dd) \
4871 do { \
4872 aprint_normal("%s%s", sep, ss); \
4873 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4874 sep = ", "; \
4875 } while (/*CONSTCOND*/0)
4876
4877 aprint_normal_dev(sc->sc_dev, "");
4878 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4879 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4880 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4881 aprint_normal("\n");
4882
4883 #undef ADD
4884
4885 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
4886 }
4887
4888 /*
4889 * wm_tbi_mediastatus: [ifmedia interface function]
4890 *
4891 * Get the current interface media status on a 1000BASE-X device.
4892 */
4893 static void
4894 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4895 {
4896 struct wm_softc *sc = ifp->if_softc;
4897 uint32_t ctrl, status;
4898
4899 ifmr->ifm_status = IFM_AVALID;
4900 ifmr->ifm_active = IFM_ETHER;
4901
4902 status = CSR_READ(sc, WMREG_STATUS);
4903 if ((status & STATUS_LU) == 0) {
4904 ifmr->ifm_active |= IFM_NONE;
4905 return;
4906 }
4907
4908 ifmr->ifm_status |= IFM_ACTIVE;
4909 ifmr->ifm_active |= IFM_1000_SX;
4910 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4911 ifmr->ifm_active |= IFM_FDX;
4912 ctrl = CSR_READ(sc, WMREG_CTRL);
4913 if (ctrl & CTRL_RFCE)
4914 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4915 if (ctrl & CTRL_TFCE)
4916 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4917 }
4918
4919 /*
4920 * wm_tbi_mediachange: [ifmedia interface function]
4921 *
4922 * Set hardware to newly-selected media on a 1000BASE-X device.
4923 */
4924 static int
4925 wm_tbi_mediachange(struct ifnet *ifp)
4926 {
4927 struct wm_softc *sc = ifp->if_softc;
4928 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4929 uint32_t status;
4930 int i;
4931
4932 sc->sc_txcw = 0;
4933 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4934 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4935 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4936 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4937 sc->sc_txcw |= TXCW_ANE;
4938 } else {
4939 /*
4940 * If autonegotiation is turned off, force link up and turn on
4941 * full duplex
4942 */
4943 sc->sc_txcw &= ~TXCW_ANE;
4944 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4945 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4946 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4947 delay(1000);
4948 }
4949
4950 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4951 device_xname(sc->sc_dev),sc->sc_txcw));
4952 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4953 delay(10000);
4954
4955 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4956 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4957
4958 /*
4959 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4960 * optics detect a signal, 0 if they don't.
4961 */
4962 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4963 /* Have signal; wait for the link to come up. */
4964
4965 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4966 /*
4967 * Reset the link, and let autonegotiation do its thing
4968 */
4969 sc->sc_ctrl |= CTRL_LRST;
4970 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4971 delay(1000);
4972 sc->sc_ctrl &= ~CTRL_LRST;
4973 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4974 delay(1000);
4975 }
4976
4977 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4978 delay(10000);
4979 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4980 break;
4981 }
4982
4983 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4984 device_xname(sc->sc_dev),i));
4985
4986 status = CSR_READ(sc, WMREG_STATUS);
4987 DPRINTF(WM_DEBUG_LINK,
4988 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4989 device_xname(sc->sc_dev),status, STATUS_LU));
4990 if (status & STATUS_LU) {
4991 /* Link is up. */
4992 DPRINTF(WM_DEBUG_LINK,
4993 ("%s: LINK: set media -> link up %s\n",
4994 device_xname(sc->sc_dev),
4995 (status & STATUS_FD) ? "FDX" : "HDX"));
4996
4997 /*
4998 * NOTE: CTRL will update TFCE and RFCE automatically,
4999 * so we should update sc->sc_ctrl
5000 */
5001 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5002 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5003 sc->sc_fcrtl &= ~FCRTL_XONE;
5004 if (status & STATUS_FD)
5005 sc->sc_tctl |=
5006 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5007 else
5008 sc->sc_tctl |=
5009 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5010 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5011 sc->sc_fcrtl |= FCRTL_XONE;
5012 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5013 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5014 WMREG_OLD_FCRTL : WMREG_FCRTL,
5015 sc->sc_fcrtl);
5016 sc->sc_tbi_linkup = 1;
5017 } else {
5018 if (i == WM_LINKUP_TIMEOUT)
5019 wm_check_for_link(sc);
5020 /* Link is down. */
5021 DPRINTF(WM_DEBUG_LINK,
5022 ("%s: LINK: set media -> link down\n",
5023 device_xname(sc->sc_dev)));
5024 sc->sc_tbi_linkup = 0;
5025 }
5026 } else {
5027 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5028 device_xname(sc->sc_dev)));
5029 sc->sc_tbi_linkup = 0;
5030 }
5031
5032 wm_tbi_set_linkled(sc);
5033
5034 return 0;
5035 }
5036
5037 /*
5038 * wm_tbi_set_linkled:
5039 *
5040 * Update the link LED on 1000BASE-X devices.
5041 */
5042 static void
5043 wm_tbi_set_linkled(struct wm_softc *sc)
5044 {
5045
5046 if (sc->sc_tbi_linkup)
5047 sc->sc_ctrl |= CTRL_SWDPIN(0);
5048 else
5049 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5050
5051 /* 82540 or newer devices are active low */
5052 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5053
5054 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5055 }
5056
5057 /*
5058 * wm_tbi_check_link:
5059 *
5060 * Check the link on 1000BASE-X devices.
5061 */
5062 static void
5063 wm_tbi_check_link(struct wm_softc *sc)
5064 {
5065 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5066 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5067 uint32_t rxcw, ctrl, status;
5068
5069 status = CSR_READ(sc, WMREG_STATUS);
5070
5071 rxcw = CSR_READ(sc, WMREG_RXCW);
5072 ctrl = CSR_READ(sc, WMREG_CTRL);
5073
5074 /* set link status */
5075 if ((status & STATUS_LU) == 0) {
5076 DPRINTF(WM_DEBUG_LINK,
5077 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
5078 sc->sc_tbi_linkup = 0;
5079 } else if (sc->sc_tbi_linkup == 0) {
5080 DPRINTF(WM_DEBUG_LINK,
5081 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
5082 (status & STATUS_FD) ? "FDX" : "HDX"));
5083 sc->sc_tbi_linkup = 1;
5084 }
5085
5086 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
5087 && ((status & STATUS_LU) == 0)) {
5088 sc->sc_tbi_linkup = 0;
5089 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
5090 /* RXCFG storm! */
5091 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
5092 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
5093 wm_init(ifp);
5094 wm_start(ifp);
5095 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5096 /* If the timer expired, retry autonegotiation */
5097 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
5098 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
5099 sc->sc_tbi_ticks = 0;
5100 /*
5101 * Reset the link, and let autonegotiation do
5102 * its thing
5103 */
5104 sc->sc_ctrl |= CTRL_LRST;
5105 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5106 delay(1000);
5107 sc->sc_ctrl &= ~CTRL_LRST;
5108 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5109 delay(1000);
5110 CSR_WRITE(sc, WMREG_TXCW,
5111 sc->sc_txcw & ~TXCW_ANE);
5112 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5113 }
5114 }
5115 }
5116
5117 wm_tbi_set_linkled(sc);
5118 }
5119
5120 /*
5121 * wm_gmii_reset:
5122 *
5123 * Reset the PHY.
5124 */
5125 static void
5126 wm_gmii_reset(struct wm_softc *sc)
5127 {
5128 uint32_t reg;
5129 int rv;
5130
5131 /* get phy semaphore */
5132 switch (sc->sc_type) {
5133 case WM_T_82571:
5134 case WM_T_82572:
5135 case WM_T_82573:
5136 case WM_T_82574:
5137 case WM_T_82583:
5138 /* XXX should get sw semaphore, too */
5139 rv = wm_get_swsm_semaphore(sc);
5140 break;
5141 case WM_T_82575:
5142 case WM_T_82576:
5143 case WM_T_82580:
5144 case WM_T_82580ER:
5145 case WM_T_80003:
5146 rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5147 break;
5148 case WM_T_ICH8:
5149 case WM_T_ICH9:
5150 case WM_T_ICH10:
5151 case WM_T_PCH:
5152 rv = wm_get_swfwhw_semaphore(sc);
5153 break;
5154 default:
5155 /* nothing to do*/
5156 rv = 0;
5157 break;
5158 }
5159 if (rv != 0) {
5160 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5161 __func__);
5162 return;
5163 }
5164
5165 switch (sc->sc_type) {
5166 case WM_T_82542_2_0:
5167 case WM_T_82542_2_1:
5168 /* null */
5169 break;
5170 case WM_T_82543:
5171 /*
5172 * With 82543, we need to force speed and duplex on the MAC
5173 * equal to what the PHY speed and duplex configuration is.
5174 * In addition, we need to perform a hardware reset on the PHY
5175 * to take it out of reset.
5176 */
5177 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5178 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5179
5180 /* The PHY reset pin is active-low. */
5181 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5182 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
5183 CTRL_EXT_SWDPIN(4));
5184 reg |= CTRL_EXT_SWDPIO(4);
5185
5186 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5187 delay(10*1000);
5188
5189 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
5190 delay(150);
5191 #if 0
5192 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
5193 #endif
5194 delay(20*1000); /* XXX extra delay to get PHY ID? */
5195 break;
5196 case WM_T_82544: /* reset 10000us */
5197 case WM_T_82540:
5198 case WM_T_82545:
5199 case WM_T_82545_3:
5200 case WM_T_82546:
5201 case WM_T_82546_3:
5202 case WM_T_82541:
5203 case WM_T_82541_2:
5204 case WM_T_82547:
5205 case WM_T_82547_2:
5206 case WM_T_82571: /* reset 100us */
5207 case WM_T_82572:
5208 case WM_T_82573:
5209 case WM_T_82574:
5210 case WM_T_82575:
5211 case WM_T_82576:
5212 case WM_T_82580:
5213 case WM_T_82580ER:
5214 case WM_T_82583:
5215 case WM_T_80003:
5216 /* generic reset */
5217 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5218 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
5219 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5220 delay(150);
5221
5222 if ((sc->sc_type == WM_T_82541)
5223 || (sc->sc_type == WM_T_82541_2)
5224 || (sc->sc_type == WM_T_82547)
5225 || (sc->sc_type == WM_T_82547_2)) {
5226 /* workaround for igp are done in igp_reset() */
5227 /* XXX add code to set LED after phy reset */
5228 }
5229 break;
5230 case WM_T_ICH8:
5231 case WM_T_ICH9:
5232 case WM_T_ICH10:
5233 case WM_T_PCH:
5234 /* generic reset */
5235 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
5236 delay(100);
5237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5238 delay(150);
5239 break;
5240 default:
5241 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5242 __func__);
5243 break;
5244 }
5245
5246 /* release PHY semaphore */
5247 switch (sc->sc_type) {
5248 case WM_T_82571:
5249 case WM_T_82572:
5250 case WM_T_82573:
5251 case WM_T_82574:
5252 case WM_T_82583:
5253 /* XXX sould put sw semaphore, too */
5254 wm_put_swsm_semaphore(sc);
5255 break;
5256 case WM_T_82575:
5257 case WM_T_82576:
5258 case WM_T_82580:
5259 case WM_T_82580ER:
5260 case WM_T_80003:
5261 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
5262 break;
5263 case WM_T_ICH8:
5264 case WM_T_ICH9:
5265 case WM_T_ICH10:
5266 case WM_T_PCH:
5267 wm_put_swfwhw_semaphore(sc);
5268 break;
5269 default:
5270 /* nothing to do*/
5271 rv = 0;
5272 break;
5273 }
5274
5275 /* get_cfg_done */
5276 wm_get_cfg_done(sc);
5277
5278 /* extra setup */
5279 switch (sc->sc_type) {
5280 case WM_T_82542_2_0:
5281 case WM_T_82542_2_1:
5282 case WM_T_82543:
5283 case WM_T_82544:
5284 case WM_T_82540:
5285 case WM_T_82545:
5286 case WM_T_82545_3:
5287 case WM_T_82546:
5288 case WM_T_82546_3:
5289 case WM_T_82541_2:
5290 case WM_T_82547_2:
5291 case WM_T_82571:
5292 case WM_T_82572:
5293 case WM_T_82573:
5294 case WM_T_82574:
5295 case WM_T_82575:
5296 case WM_T_82576:
5297 case WM_T_82580:
5298 case WM_T_82580ER:
5299 case WM_T_82583:
5300 case WM_T_80003:
5301 /* null */
5302 break;
5303 case WM_T_82541:
5304 case WM_T_82547:
5305 /* XXX Configure actively LED after PHY reset */
5306 break;
5307 case WM_T_ICH8:
5308 case WM_T_ICH9:
5309 case WM_T_ICH10:
5310 case WM_T_PCH:
5311 /* Allow time for h/w to get to a quiescent state afer reset */
5312 delay(10*1000);
5313
5314 if (sc->sc_type == WM_T_PCH) {
5315 wm_hv_phy_workaround_ich8lan(sc);
5316
5317 /*
5318 * dummy read to clear the phy wakeup bit after lcd
5319 * reset
5320 */
5321 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
5322 }
5323
5324 /*
5325 * XXX Configure the LCD with th extended configuration region
5326 * in NVM
5327 */
5328
5329 /* Configure the LCD with the OEM bits in NVM */
5330 if (sc->sc_type == WM_T_PCH) {
5331 /*
5332 * Disable LPLU.
5333 * XXX It seems that 82567 has LPLU, too.
5334 */
5335 reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
5336 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
5337 reg |= HV_OEM_BITS_ANEGNOW;
5338 wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
5339 }
5340 break;
5341 default:
5342 panic("%s: unknown type\n", __func__);
5343 break;
5344 }
5345 }
5346
5347 /*
5348 * wm_gmii_mediainit:
5349 *
5350 * Initialize media for use on 1000BASE-T devices.
5351 */
5352 static void
5353 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
5354 {
5355 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5356
5357 /* We have MII. */
5358 sc->sc_flags |= WM_F_HAS_MII;
5359
5360 if (sc->sc_type == WM_T_80003)
5361 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5362 else
5363 sc->sc_tipg = TIPG_1000T_DFLT;
5364
5365 /*
5366 * Let the chip set speed/duplex on its own based on
5367 * signals from the PHY.
5368 * XXXbouyer - I'm not sure this is right for the 80003,
5369 * the em driver only sets CTRL_SLU here - but it seems to work.
5370 */
5371 sc->sc_ctrl |= CTRL_SLU;
5372 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5373
5374 /* Initialize our media structures and probe the GMII. */
5375 sc->sc_mii.mii_ifp = ifp;
5376
5377 switch (prodid) {
5378 case PCI_PRODUCT_INTEL_PCH_M_LM:
5379 case PCI_PRODUCT_INTEL_PCH_M_LC:
5380 /* 82577 */
5381 sc->sc_phytype = WMPHY_82577;
5382 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5383 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5384 break;
5385 case PCI_PRODUCT_INTEL_PCH_D_DM:
5386 case PCI_PRODUCT_INTEL_PCH_D_DC:
5387 /* 82578 */
5388 sc->sc_phytype = WMPHY_82578;
5389 sc->sc_mii.mii_readreg = wm_gmii_hv_readreg;
5390 sc->sc_mii.mii_writereg = wm_gmii_hv_writereg;
5391 break;
5392 case PCI_PRODUCT_INTEL_82801I_BM:
5393 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5394 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5395 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5396 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5397 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5398 /* 82567 */
5399 sc->sc_phytype = WMPHY_BM;
5400 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5401 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5402 break;
5403 default:
5404 if ((sc->sc_flags & WM_F_SGMII) != 0) {
5405 sc->sc_mii.mii_readreg = wm_sgmii_readreg;
5406 sc->sc_mii.mii_writereg = wm_sgmii_writereg;
5407 } else if (sc->sc_type >= WM_T_80003) {
5408 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5409 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5410 } else if (sc->sc_type >= WM_T_82544) {
5411 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5412 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5413 } else {
5414 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5415 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5416 }
5417 break;
5418 }
5419 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5420
5421 wm_gmii_reset(sc);
5422
5423 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5424 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5425 wm_gmii_mediastatus);
5426
5427 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5428 MII_OFFSET_ANY, MIIF_DOPAUSE);
5429
5430 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5431 /* if failed, retry with *_bm_* */
5432 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5433 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5434
5435 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5436 MII_OFFSET_ANY, MIIF_DOPAUSE);
5437 }
5438 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5439 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5440 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5441 sc->sc_phytype = WMPHY_NONE;
5442 } else {
5443 /* Check PHY type */
5444 uint32_t model;
5445 struct mii_softc *child;
5446
5447 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5448 if (device_is_a(child->mii_dev, "igphy")) {
5449 struct igphy_softc *isc = (struct igphy_softc *)child;
5450
5451 model = isc->sc_mii.mii_mpd_model;
5452 if (model == MII_MODEL_yyINTEL_I82566)
5453 sc->sc_phytype = WMPHY_IGP_3;
5454 }
5455
5456 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5457 }
5458 }
5459
5460 /*
5461 * wm_gmii_mediastatus: [ifmedia interface function]
5462 *
5463 * Get the current interface media status on a 1000BASE-T device.
5464 */
5465 static void
5466 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5467 {
5468 struct wm_softc *sc = ifp->if_softc;
5469
5470 ether_mediastatus(ifp, ifmr);
5471 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
5472 | sc->sc_flowflags;
5473 }
5474
5475 /*
5476 * wm_gmii_mediachange: [ifmedia interface function]
5477 *
5478 * Set hardware to newly-selected media on a 1000BASE-T device.
5479 */
5480 static int
5481 wm_gmii_mediachange(struct ifnet *ifp)
5482 {
5483 struct wm_softc *sc = ifp->if_softc;
5484 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5485 int rc;
5486
5487 if ((ifp->if_flags & IFF_UP) == 0)
5488 return 0;
5489
5490 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5491 sc->sc_ctrl |= CTRL_SLU;
5492 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5493 || (sc->sc_type > WM_T_82543)) {
5494 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5495 } else {
5496 sc->sc_ctrl &= ~CTRL_ASDE;
5497 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5498 if (ife->ifm_media & IFM_FDX)
5499 sc->sc_ctrl |= CTRL_FD;
5500 switch (IFM_SUBTYPE(ife->ifm_media)) {
5501 case IFM_10_T:
5502 sc->sc_ctrl |= CTRL_SPEED_10;
5503 break;
5504 case IFM_100_TX:
5505 sc->sc_ctrl |= CTRL_SPEED_100;
5506 break;
5507 case IFM_1000_T:
5508 sc->sc_ctrl |= CTRL_SPEED_1000;
5509 break;
5510 default:
5511 panic("wm_gmii_mediachange: bad media 0x%x",
5512 ife->ifm_media);
5513 }
5514 }
5515 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5516 if (sc->sc_type <= WM_T_82543)
5517 wm_gmii_reset(sc);
5518
5519 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5520 return 0;
5521 return rc;
5522 }
5523
5524 #define MDI_IO CTRL_SWDPIN(2)
5525 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5526 #define MDI_CLK CTRL_SWDPIN(3)
5527
5528 static void
5529 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5530 {
5531 uint32_t i, v;
5532
5533 v = CSR_READ(sc, WMREG_CTRL);
5534 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5535 v |= MDI_DIR | CTRL_SWDPIO(3);
5536
5537 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5538 if (data & i)
5539 v |= MDI_IO;
5540 else
5541 v &= ~MDI_IO;
5542 CSR_WRITE(sc, WMREG_CTRL, v);
5543 delay(10);
5544 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5545 delay(10);
5546 CSR_WRITE(sc, WMREG_CTRL, v);
5547 delay(10);
5548 }
5549 }
5550
5551 static uint32_t
5552 i82543_mii_recvbits(struct wm_softc *sc)
5553 {
5554 uint32_t v, i, data = 0;
5555
5556 v = CSR_READ(sc, WMREG_CTRL);
5557 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5558 v |= CTRL_SWDPIO(3);
5559
5560 CSR_WRITE(sc, WMREG_CTRL, v);
5561 delay(10);
5562 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5563 delay(10);
5564 CSR_WRITE(sc, WMREG_CTRL, v);
5565 delay(10);
5566
5567 for (i = 0; i < 16; i++) {
5568 data <<= 1;
5569 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5570 delay(10);
5571 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5572 data |= 1;
5573 CSR_WRITE(sc, WMREG_CTRL, v);
5574 delay(10);
5575 }
5576
5577 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5578 delay(10);
5579 CSR_WRITE(sc, WMREG_CTRL, v);
5580 delay(10);
5581
5582 return data;
5583 }
5584
5585 #undef MDI_IO
5586 #undef MDI_DIR
5587 #undef MDI_CLK
5588
5589 /*
5590 * wm_gmii_i82543_readreg: [mii interface function]
5591 *
5592 * Read a PHY register on the GMII (i82543 version).
5593 */
5594 static int
5595 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5596 {
5597 struct wm_softc *sc = device_private(self);
5598 int rv;
5599
5600 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5601 i82543_mii_sendbits(sc, reg | (phy << 5) |
5602 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5603 rv = i82543_mii_recvbits(sc) & 0xffff;
5604
5605 DPRINTF(WM_DEBUG_GMII,
5606 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5607 device_xname(sc->sc_dev), phy, reg, rv));
5608
5609 return rv;
5610 }
5611
5612 /*
5613 * wm_gmii_i82543_writereg: [mii interface function]
5614 *
5615 * Write a PHY register on the GMII (i82543 version).
5616 */
5617 static void
5618 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5619 {
5620 struct wm_softc *sc = device_private(self);
5621
5622 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5623 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5624 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5625 (MII_COMMAND_START << 30), 32);
5626 }
5627
5628 /*
5629 * wm_gmii_i82544_readreg: [mii interface function]
5630 *
5631 * Read a PHY register on the GMII.
5632 */
5633 static int
5634 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5635 {
5636 struct wm_softc *sc = device_private(self);
5637 uint32_t mdic = 0;
5638 int i, rv;
5639
5640 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5641 MDIC_REGADD(reg));
5642
5643 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5644 mdic = CSR_READ(sc, WMREG_MDIC);
5645 if (mdic & MDIC_READY)
5646 break;
5647 delay(50);
5648 }
5649
5650 if ((mdic & MDIC_READY) == 0) {
5651 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5652 device_xname(sc->sc_dev), phy, reg);
5653 rv = 0;
5654 } else if (mdic & MDIC_E) {
5655 #if 0 /* This is normal if no PHY is present. */
5656 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5657 device_xname(sc->sc_dev), phy, reg);
5658 #endif
5659 rv = 0;
5660 } else {
5661 rv = MDIC_DATA(mdic);
5662 if (rv == 0xffff)
5663 rv = 0;
5664 }
5665
5666 return rv;
5667 }
5668
5669 /*
5670 * wm_gmii_i82544_writereg: [mii interface function]
5671 *
5672 * Write a PHY register on the GMII.
5673 */
5674 static void
5675 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5676 {
5677 struct wm_softc *sc = device_private(self);
5678 uint32_t mdic = 0;
5679 int i;
5680
5681 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5682 MDIC_REGADD(reg) | MDIC_DATA(val));
5683
5684 for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
5685 mdic = CSR_READ(sc, WMREG_MDIC);
5686 if (mdic & MDIC_READY)
5687 break;
5688 delay(50);
5689 }
5690
5691 if ((mdic & MDIC_READY) == 0)
5692 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5693 device_xname(sc->sc_dev), phy, reg);
5694 else if (mdic & MDIC_E)
5695 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5696 device_xname(sc->sc_dev), phy, reg);
5697 }
5698
5699 /*
5700 * wm_gmii_i80003_readreg: [mii interface function]
5701 *
5702 * Read a PHY register on the kumeran
5703 * This could be handled by the PHY layer if we didn't have to lock the
5704 * ressource ...
5705 */
5706 static int
5707 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5708 {
5709 struct wm_softc *sc = device_private(self);
5710 int sem;
5711 int rv;
5712
5713 if (phy != 1) /* only one PHY on kumeran bus */
5714 return 0;
5715
5716 sem = swfwphysem[sc->sc_funcid];
5717 if (wm_get_swfw_semaphore(sc, sem)) {
5718 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5719 __func__);
5720 return 0;
5721 }
5722
5723 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5724 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5725 reg >> GG82563_PAGE_SHIFT);
5726 } else {
5727 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5728 reg >> GG82563_PAGE_SHIFT);
5729 }
5730 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5731 delay(200);
5732 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5733 delay(200);
5734
5735 wm_put_swfw_semaphore(sc, sem);
5736 return rv;
5737 }
5738
5739 /*
5740 * wm_gmii_i80003_writereg: [mii interface function]
5741 *
5742 * Write a PHY register on the kumeran.
5743 * This could be handled by the PHY layer if we didn't have to lock the
5744 * ressource ...
5745 */
5746 static void
5747 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5748 {
5749 struct wm_softc *sc = device_private(self);
5750 int sem;
5751
5752 if (phy != 1) /* only one PHY on kumeran bus */
5753 return;
5754
5755 sem = swfwphysem[sc->sc_funcid];
5756 if (wm_get_swfw_semaphore(sc, sem)) {
5757 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5758 __func__);
5759 return;
5760 }
5761
5762 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5763 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5764 reg >> GG82563_PAGE_SHIFT);
5765 } else {
5766 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5767 reg >> GG82563_PAGE_SHIFT);
5768 }
5769 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5770 delay(200);
5771 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5772 delay(200);
5773
5774 wm_put_swfw_semaphore(sc, sem);
5775 }
5776
5777 /*
5778 * wm_gmii_bm_readreg: [mii interface function]
5779 *
5780 * Read a PHY register on the kumeran
5781 * This could be handled by the PHY layer if we didn't have to lock the
5782 * ressource ...
5783 */
5784 static int
5785 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5786 {
5787 struct wm_softc *sc = device_private(self);
5788 int sem;
5789 int rv;
5790
5791 sem = swfwphysem[sc->sc_funcid];
5792 if (wm_get_swfw_semaphore(sc, sem)) {
5793 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5794 __func__);
5795 return 0;
5796 }
5797
5798 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5799 if (phy == 1)
5800 wm_gmii_i82544_writereg(self, phy, 0x1f,
5801 reg);
5802 else
5803 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5804 reg >> GG82563_PAGE_SHIFT);
5805
5806 }
5807
5808 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5809 wm_put_swfw_semaphore(sc, sem);
5810 return rv;
5811 }
5812
5813 /*
5814 * wm_gmii_bm_writereg: [mii interface function]
5815 *
5816 * Write a PHY register on the kumeran.
5817 * This could be handled by the PHY layer if we didn't have to lock the
5818 * ressource ...
5819 */
5820 static void
5821 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5822 {
5823 struct wm_softc *sc = device_private(self);
5824 int sem;
5825
5826 sem = swfwphysem[sc->sc_funcid];
5827 if (wm_get_swfw_semaphore(sc, sem)) {
5828 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5829 __func__);
5830 return;
5831 }
5832
5833 if (reg > BME1000_MAX_MULTI_PAGE_REG) {
5834 if (phy == 1)
5835 wm_gmii_i82544_writereg(self, phy, 0x1f,
5836 reg);
5837 else
5838 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5839 reg >> GG82563_PAGE_SHIFT);
5840
5841 }
5842
5843 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5844 wm_put_swfw_semaphore(sc, sem);
5845 }
5846
5847 static void
5848 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
5849 {
5850 struct wm_softc *sc = device_private(self);
5851 uint16_t regnum = BM_PHY_REG_NUM(offset);
5852 uint16_t wuce;
5853
5854 /* XXX Gig must be disabled for MDIO accesses to page 800 */
5855 if (sc->sc_type == WM_T_PCH) {
5856 /* XXX e1000 driver do nothing... why? */
5857 }
5858
5859 /* Set page 769 */
5860 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5861 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5862
5863 wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
5864
5865 wuce &= ~BM_WUC_HOST_WU_BIT;
5866 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
5867 wuce | BM_WUC_ENABLE_BIT);
5868
5869 /* Select page 800 */
5870 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5871 BM_WUC_PAGE << BME1000_PAGE_SHIFT);
5872
5873 /* Write page 800 */
5874 wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
5875
5876 if (rd)
5877 *val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
5878 else
5879 wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
5880
5881 /* Set page 769 */
5882 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5883 BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
5884
5885 wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
5886 }
5887
5888 /*
5889 * wm_gmii_hv_readreg: [mii interface function]
5890 *
5891 * Read a PHY register on the kumeran
5892 * This could be handled by the PHY layer if we didn't have to lock the
5893 * ressource ...
5894 */
5895 static int
5896 wm_gmii_hv_readreg(device_t self, int phy, int reg)
5897 {
5898 struct wm_softc *sc = device_private(self);
5899 uint16_t page = BM_PHY_REG_PAGE(reg);
5900 uint16_t regnum = BM_PHY_REG_NUM(reg);
5901 uint16_t val;
5902 int rv;
5903
5904 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5905 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5906 __func__);
5907 return 0;
5908 }
5909
5910 /* XXX Workaround failure in MDIO access while cable is disconnected */
5911 if (sc->sc_phytype == WMPHY_82577) {
5912 /* XXX must write */
5913 }
5914
5915 /* Page 800 works differently than the rest so it has its own func */
5916 if (page == BM_WUC_PAGE) {
5917 wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
5918 return val;
5919 }
5920
5921 /*
5922 * Lower than page 768 works differently than the rest so it has its
5923 * own func
5924 */
5925 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5926 printf("gmii_hv_readreg!!!\n");
5927 return 0;
5928 }
5929
5930 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5931 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5932 page << BME1000_PAGE_SHIFT);
5933 }
5934
5935 rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
5936 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5937 return rv;
5938 }
5939
5940 /*
5941 * wm_gmii_hv_writereg: [mii interface function]
5942 *
5943 * Write a PHY register on the kumeran.
5944 * This could be handled by the PHY layer if we didn't have to lock the
5945 * ressource ...
5946 */
5947 static void
5948 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
5949 {
5950 struct wm_softc *sc = device_private(self);
5951 uint16_t page = BM_PHY_REG_PAGE(reg);
5952 uint16_t regnum = BM_PHY_REG_NUM(reg);
5953
5954 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5955 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5956 __func__);
5957 return;
5958 }
5959
5960 /* XXX Workaround failure in MDIO access while cable is disconnected */
5961
5962 /* Page 800 works differently than the rest so it has its own func */
5963 if (page == BM_WUC_PAGE) {
5964 uint16_t tmp;
5965
5966 tmp = val;
5967 wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
5968 return;
5969 }
5970
5971 /*
5972 * Lower than page 768 works differently than the rest so it has its
5973 * own func
5974 */
5975 if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
5976 printf("gmii_hv_writereg!!!\n");
5977 return;
5978 }
5979
5980 /*
5981 * XXX Workaround MDIO accesses being disabled after entering IEEE
5982 * Power Down (whenever bit 11 of the PHY control register is set)
5983 */
5984
5985 if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
5986 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5987 page << BME1000_PAGE_SHIFT);
5988 }
5989
5990 wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
5991 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5992 }
5993
5994 /*
5995 * wm_gmii_hv_readreg: [mii interface function]
5996 *
5997 * Read a PHY register on the kumeran
5998 * This could be handled by the PHY layer if we didn't have to lock the
5999 * ressource ...
6000 */
6001 static int
6002 wm_sgmii_readreg(device_t self, int phy, int reg)
6003 {
6004 struct wm_softc *sc = device_private(self);
6005 uint32_t i2ccmd;
6006 int i, rv;
6007
6008 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6009 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6010 __func__);
6011 return 0;
6012 }
6013
6014 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6015 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6016 | I2CCMD_OPCODE_READ;
6017 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6018
6019 /* Poll the ready bit */
6020 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6021 delay(50);
6022 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6023 if (i2ccmd & I2CCMD_READY)
6024 break;
6025 }
6026 if ((i2ccmd & I2CCMD_READY) == 0)
6027 aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
6028 if ((i2ccmd & I2CCMD_ERROR) != 0)
6029 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6030
6031 rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
6032
6033 wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6034 return rv;
6035 }
6036
6037 /*
6038 * wm_gmii_hv_writereg: [mii interface function]
6039 *
6040 * Write a PHY register on the kumeran.
6041 * This could be handled by the PHY layer if we didn't have to lock the
6042 * ressource ...
6043 */
6044 static void
6045 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
6046 {
6047 struct wm_softc *sc = device_private(self);
6048 uint32_t i2ccmd;
6049 int i;
6050
6051 if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
6052 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6053 __func__);
6054 return;
6055 }
6056
6057 i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
6058 | (phy << I2CCMD_PHY_ADDR_SHIFT)
6059 | I2CCMD_OPCODE_WRITE;
6060 CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
6061
6062 /* Poll the ready bit */
6063 for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
6064 delay(50);
6065 i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
6066 if (i2ccmd & I2CCMD_READY)
6067 break;
6068 }
6069 if ((i2ccmd & I2CCMD_READY) == 0)
6070 aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
6071 if ((i2ccmd & I2CCMD_ERROR) != 0)
6072 aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
6073
6074 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6075 }
6076
6077 /*
6078 * wm_gmii_statchg: [mii interface function]
6079 *
6080 * Callback from MII layer when media changes.
6081 */
6082 static void
6083 wm_gmii_statchg(device_t self)
6084 {
6085 struct wm_softc *sc = device_private(self);
6086 struct mii_data *mii = &sc->sc_mii;
6087
6088 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6089 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6090 sc->sc_fcrtl &= ~FCRTL_XONE;
6091
6092 /*
6093 * Get flow control negotiation result.
6094 */
6095 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
6096 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
6097 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
6098 mii->mii_media_active &= ~IFM_ETH_FMASK;
6099 }
6100
6101 if (sc->sc_flowflags & IFM_FLOW) {
6102 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
6103 sc->sc_ctrl |= CTRL_TFCE;
6104 sc->sc_fcrtl |= FCRTL_XONE;
6105 }
6106 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
6107 sc->sc_ctrl |= CTRL_RFCE;
6108 }
6109
6110 if (sc->sc_mii.mii_media_active & IFM_FDX) {
6111 DPRINTF(WM_DEBUG_LINK,
6112 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
6113 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6114 } else {
6115 DPRINTF(WM_DEBUG_LINK,
6116 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
6117 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6118 }
6119
6120 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6121 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6122 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
6123 : WMREG_FCRTL, sc->sc_fcrtl);
6124 if (sc->sc_type == WM_T_80003) {
6125 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
6126 case IFM_1000_T:
6127 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6128 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
6129 sc->sc_tipg = TIPG_1000T_80003_DFLT;
6130 break;
6131 default:
6132 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
6133 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
6134 sc->sc_tipg = TIPG_10_100_80003_DFLT;
6135 break;
6136 }
6137 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
6138 }
6139 }
6140
6141 /*
6142 * wm_kmrn_readreg:
6143 *
6144 * Read a kumeran register
6145 */
6146 static int
6147 wm_kmrn_readreg(struct wm_softc *sc, int reg)
6148 {
6149 int rv;
6150
6151 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6152 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6153 aprint_error_dev(sc->sc_dev,
6154 "%s: failed to get semaphore\n", __func__);
6155 return 0;
6156 }
6157 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6158 if (wm_get_swfwhw_semaphore(sc)) {
6159 aprint_error_dev(sc->sc_dev,
6160 "%s: failed to get semaphore\n", __func__);
6161 return 0;
6162 }
6163 }
6164
6165 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6166 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6167 KUMCTRLSTA_REN);
6168 delay(2);
6169
6170 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
6171
6172 if (sc->sc_flags == WM_F_SWFW_SYNC)
6173 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6174 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6175 wm_put_swfwhw_semaphore(sc);
6176
6177 return rv;
6178 }
6179
6180 /*
6181 * wm_kmrn_writereg:
6182 *
6183 * Write a kumeran register
6184 */
6185 static void
6186 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
6187 {
6188
6189 if (sc->sc_flags == WM_F_SWFW_SYNC) {
6190 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
6191 aprint_error_dev(sc->sc_dev,
6192 "%s: failed to get semaphore\n", __func__);
6193 return;
6194 }
6195 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
6196 if (wm_get_swfwhw_semaphore(sc)) {
6197 aprint_error_dev(sc->sc_dev,
6198 "%s: failed to get semaphore\n", __func__);
6199 return;
6200 }
6201 }
6202
6203 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
6204 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
6205 (val & KUMCTRLSTA_MASK));
6206
6207 if (sc->sc_flags == WM_F_SWFW_SYNC)
6208 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
6209 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
6210 wm_put_swfwhw_semaphore(sc);
6211 }
6212
6213 static int
6214 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
6215 {
6216 uint32_t eecd = 0;
6217
6218 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
6219 || sc->sc_type == WM_T_82583) {
6220 eecd = CSR_READ(sc, WMREG_EECD);
6221
6222 /* Isolate bits 15 & 16 */
6223 eecd = ((eecd >> 15) & 0x03);
6224
6225 /* If both bits are set, device is Flash type */
6226 if (eecd == 0x03)
6227 return 0;
6228 }
6229 return 1;
6230 }
6231
6232 static int
6233 wm_get_swsm_semaphore(struct wm_softc *sc)
6234 {
6235 int32_t timeout;
6236 uint32_t swsm;
6237
6238 /* Get the FW semaphore. */
6239 timeout = 1000 + 1; /* XXX */
6240 while (timeout) {
6241 swsm = CSR_READ(sc, WMREG_SWSM);
6242 swsm |= SWSM_SWESMBI;
6243 CSR_WRITE(sc, WMREG_SWSM, swsm);
6244 /* if we managed to set the bit we got the semaphore. */
6245 swsm = CSR_READ(sc, WMREG_SWSM);
6246 if (swsm & SWSM_SWESMBI)
6247 break;
6248
6249 delay(50);
6250 timeout--;
6251 }
6252
6253 if (timeout == 0) {
6254 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
6255 /* Release semaphores */
6256 wm_put_swsm_semaphore(sc);
6257 return 1;
6258 }
6259 return 0;
6260 }
6261
6262 static void
6263 wm_put_swsm_semaphore(struct wm_softc *sc)
6264 {
6265 uint32_t swsm;
6266
6267 swsm = CSR_READ(sc, WMREG_SWSM);
6268 swsm &= ~(SWSM_SWESMBI);
6269 CSR_WRITE(sc, WMREG_SWSM, swsm);
6270 }
6271
6272 static int
6273 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6274 {
6275 uint32_t swfw_sync;
6276 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
6277 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
6278 int timeout = 200;
6279
6280 for (timeout = 0; timeout < 200; timeout++) {
6281 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6282 if (wm_get_swsm_semaphore(sc)) {
6283 aprint_error_dev(sc->sc_dev,
6284 "%s: failed to get semaphore\n",
6285 __func__);
6286 return 1;
6287 }
6288 }
6289 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6290 if ((swfw_sync & (swmask | fwmask)) == 0) {
6291 swfw_sync |= swmask;
6292 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6293 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6294 wm_put_swsm_semaphore(sc);
6295 return 0;
6296 }
6297 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6298 wm_put_swsm_semaphore(sc);
6299 delay(5000);
6300 }
6301 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
6302 device_xname(sc->sc_dev), mask, swfw_sync);
6303 return 1;
6304 }
6305
6306 static void
6307 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
6308 {
6309 uint32_t swfw_sync;
6310
6311 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
6312 while (wm_get_swsm_semaphore(sc) != 0)
6313 continue;
6314 }
6315 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
6316 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
6317 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
6318 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
6319 wm_put_swsm_semaphore(sc);
6320 }
6321
6322 static int
6323 wm_get_swfwhw_semaphore(struct wm_softc *sc)
6324 {
6325 uint32_t ext_ctrl;
6326 int timeout = 200;
6327
6328 for (timeout = 0; timeout < 200; timeout++) {
6329 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6330 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
6331 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6332
6333 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6334 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
6335 return 0;
6336 delay(5000);
6337 }
6338 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
6339 device_xname(sc->sc_dev), ext_ctrl);
6340 return 1;
6341 }
6342
6343 static void
6344 wm_put_swfwhw_semaphore(struct wm_softc *sc)
6345 {
6346 uint32_t ext_ctrl;
6347 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
6348 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
6349 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
6350 }
6351
6352 static int
6353 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
6354 {
6355 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
6356 uint8_t bank_high_byte;
6357 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
6358
6359 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
6360 /* Value of bit 22 corresponds to the flash bank we're on. */
6361 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
6362 } else {
6363 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
6364 if ((bank_high_byte & 0xc0) == 0x80)
6365 *bank = 0;
6366 else {
6367 wm_read_ich8_byte(sc, act_offset + bank1_offset,
6368 &bank_high_byte);
6369 if ((bank_high_byte & 0xc0) == 0x80)
6370 *bank = 1;
6371 else {
6372 aprint_error_dev(sc->sc_dev,
6373 "EEPROM not present\n");
6374 return -1;
6375 }
6376 }
6377 }
6378
6379 return 0;
6380 }
6381
6382 /******************************************************************************
6383 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
6384 * register.
6385 *
6386 * sc - Struct containing variables accessed by shared code
6387 * offset - offset of word in the EEPROM to read
6388 * data - word read from the EEPROM
6389 * words - number of words to read
6390 *****************************************************************************/
6391 static int
6392 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
6393 {
6394 int32_t error = 0;
6395 uint32_t flash_bank = 0;
6396 uint32_t act_offset = 0;
6397 uint32_t bank_offset = 0;
6398 uint16_t word = 0;
6399 uint16_t i = 0;
6400
6401 /* We need to know which is the valid flash bank. In the event
6402 * that we didn't allocate eeprom_shadow_ram, we may not be
6403 * managing flash_bank. So it cannot be trusted and needs
6404 * to be updated with each read.
6405 */
6406 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
6407 if (error) {
6408 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
6409 __func__);
6410 return error;
6411 }
6412
6413 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
6414 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
6415
6416 error = wm_get_swfwhw_semaphore(sc);
6417 if (error) {
6418 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6419 __func__);
6420 return error;
6421 }
6422
6423 for (i = 0; i < words; i++) {
6424 /* The NVM part needs a byte offset, hence * 2 */
6425 act_offset = bank_offset + ((offset + i) * 2);
6426 error = wm_read_ich8_word(sc, act_offset, &word);
6427 if (error) {
6428 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
6429 __func__);
6430 break;
6431 }
6432 data[i] = word;
6433 }
6434
6435 wm_put_swfwhw_semaphore(sc);
6436 return error;
6437 }
6438
6439 /******************************************************************************
6440 * This function does initial flash setup so that a new read/write/erase cycle
6441 * can be started.
6442 *
6443 * sc - The pointer to the hw structure
6444 ****************************************************************************/
6445 static int32_t
6446 wm_ich8_cycle_init(struct wm_softc *sc)
6447 {
6448 uint16_t hsfsts;
6449 int32_t error = 1;
6450 int32_t i = 0;
6451
6452 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6453
6454 /* May be check the Flash Des Valid bit in Hw status */
6455 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
6456 return error;
6457 }
6458
6459 /* Clear FCERR in Hw status by writing 1 */
6460 /* Clear DAEL in Hw status by writing a 1 */
6461 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
6462
6463 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6464
6465 /*
6466 * Either we should have a hardware SPI cycle in progress bit to check
6467 * against, in order to start a new cycle or FDONE bit should be
6468 * changed in the hardware so that it is 1 after harware reset, which
6469 * can then be used as an indication whether a cycle is in progress or
6470 * has been completed .. we should also have some software semaphore me
6471 * chanism to guard FDONE or the cycle in progress bit so that two
6472 * threads access to those bits can be sequentiallized or a way so that
6473 * 2 threads dont start the cycle at the same time
6474 */
6475
6476 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6477 /*
6478 * There is no cycle running at present, so we can start a
6479 * cycle
6480 */
6481
6482 /* Begin by setting Flash Cycle Done. */
6483 hsfsts |= HSFSTS_DONE;
6484 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6485 error = 0;
6486 } else {
6487 /*
6488 * otherwise poll for sometime so the current cycle has a
6489 * chance to end before giving up.
6490 */
6491 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
6492 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6493 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
6494 error = 0;
6495 break;
6496 }
6497 delay(1);
6498 }
6499 if (error == 0) {
6500 /*
6501 * Successful in waiting for previous cycle to timeout,
6502 * now set the Flash Cycle Done.
6503 */
6504 hsfsts |= HSFSTS_DONE;
6505 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
6506 }
6507 }
6508 return error;
6509 }
6510
6511 /******************************************************************************
6512 * This function starts a flash cycle and waits for its completion
6513 *
6514 * sc - The pointer to the hw structure
6515 ****************************************************************************/
6516 static int32_t
6517 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
6518 {
6519 uint16_t hsflctl;
6520 uint16_t hsfsts;
6521 int32_t error = 1;
6522 uint32_t i = 0;
6523
6524 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
6525 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6526 hsflctl |= HSFCTL_GO;
6527 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6528
6529 /* wait till FDONE bit is set to 1 */
6530 do {
6531 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6532 if (hsfsts & HSFSTS_DONE)
6533 break;
6534 delay(1);
6535 i++;
6536 } while (i < timeout);
6537 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
6538 error = 0;
6539
6540 return error;
6541 }
6542
6543 /******************************************************************************
6544 * Reads a byte or word from the NVM using the ICH8 flash access registers.
6545 *
6546 * sc - The pointer to the hw structure
6547 * index - The index of the byte or word to read.
6548 * size - Size of data to read, 1=byte 2=word
6549 * data - Pointer to the word to store the value read.
6550 *****************************************************************************/
6551 static int32_t
6552 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
6553 uint32_t size, uint16_t* data)
6554 {
6555 uint16_t hsfsts;
6556 uint16_t hsflctl;
6557 uint32_t flash_linear_address;
6558 uint32_t flash_data = 0;
6559 int32_t error = 1;
6560 int32_t count = 0;
6561
6562 if (size < 1 || size > 2 || data == 0x0 ||
6563 index > ICH_FLASH_LINEAR_ADDR_MASK)
6564 return error;
6565
6566 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
6567 sc->sc_ich8_flash_base;
6568
6569 do {
6570 delay(1);
6571 /* Steps */
6572 error = wm_ich8_cycle_init(sc);
6573 if (error)
6574 break;
6575
6576 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6577 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6578 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT)
6579 & HSFCTL_BCOUNT_MASK;
6580 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6581 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6582
6583 /*
6584 * Write the last 24 bits of index into Flash Linear address
6585 * field in Flash Address
6586 */
6587 /* TODO: TBD maybe check the index against the size of flash */
6588
6589 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6590
6591 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6592
6593 /*
6594 * Check if FCERR is set to 1, if set to 1, clear it and try
6595 * the whole sequence a few more times, else read in (shift in)
6596 * the Flash Data0, the order is least significant byte first
6597 * msb to lsb
6598 */
6599 if (error == 0) {
6600 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6601 if (size == 1)
6602 *data = (uint8_t)(flash_data & 0x000000FF);
6603 else if (size == 2)
6604 *data = (uint16_t)(flash_data & 0x0000FFFF);
6605 break;
6606 } else {
6607 /*
6608 * If we've gotten here, then things are probably
6609 * completely hosed, but if the error condition is
6610 * detected, it won't hurt to give it another try...
6611 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
6612 */
6613 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6614 if (hsfsts & HSFSTS_ERR) {
6615 /* Repeat for some time before giving up. */
6616 continue;
6617 } else if ((hsfsts & HSFSTS_DONE) == 0)
6618 break;
6619 }
6620 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6621
6622 return error;
6623 }
6624
6625 /******************************************************************************
6626 * Reads a single byte from the NVM using the ICH8 flash access registers.
6627 *
6628 * sc - pointer to wm_hw structure
6629 * index - The index of the byte to read.
6630 * data - Pointer to a byte to store the value read.
6631 *****************************************************************************/
6632 static int32_t
6633 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6634 {
6635 int32_t status;
6636 uint16_t word = 0;
6637
6638 status = wm_read_ich8_data(sc, index, 1, &word);
6639 if (status == 0)
6640 *data = (uint8_t)word;
6641
6642 return status;
6643 }
6644
6645 /******************************************************************************
6646 * Reads a word from the NVM using the ICH8 flash access registers.
6647 *
6648 * sc - pointer to wm_hw structure
6649 * index - The starting byte index of the word to read.
6650 * data - Pointer to a word to store the value read.
6651 *****************************************************************************/
6652 static int32_t
6653 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6654 {
6655 int32_t status;
6656
6657 status = wm_read_ich8_data(sc, index, 2, data);
6658 return status;
6659 }
6660
6661 static int
6662 wm_check_mng_mode(struct wm_softc *sc)
6663 {
6664 int rv;
6665
6666 switch (sc->sc_type) {
6667 case WM_T_ICH8:
6668 case WM_T_ICH9:
6669 case WM_T_ICH10:
6670 case WM_T_PCH:
6671 rv = wm_check_mng_mode_ich8lan(sc);
6672 break;
6673 case WM_T_82574:
6674 case WM_T_82583:
6675 rv = wm_check_mng_mode_82574(sc);
6676 break;
6677 case WM_T_82571:
6678 case WM_T_82572:
6679 case WM_T_82573:
6680 case WM_T_80003:
6681 rv = wm_check_mng_mode_generic(sc);
6682 break;
6683 default:
6684 /* noting to do */
6685 rv = 0;
6686 break;
6687 }
6688
6689 return rv;
6690 }
6691
6692 static int
6693 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6694 {
6695 uint32_t fwsm;
6696
6697 fwsm = CSR_READ(sc, WMREG_FWSM);
6698
6699 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6700 return 1;
6701
6702 return 0;
6703 }
6704
6705 static int
6706 wm_check_mng_mode_82574(struct wm_softc *sc)
6707 {
6708 uint16_t data;
6709
6710 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6711
6712 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6713 return 1;
6714
6715 return 0;
6716 }
6717
6718 static int
6719 wm_check_mng_mode_generic(struct wm_softc *sc)
6720 {
6721 uint32_t fwsm;
6722
6723 fwsm = CSR_READ(sc, WMREG_FWSM);
6724
6725 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6726 return 1;
6727
6728 return 0;
6729 }
6730
6731 static int
6732 wm_check_reset_block(struct wm_softc *sc)
6733 {
6734 uint32_t reg;
6735
6736 switch (sc->sc_type) {
6737 case WM_T_ICH8:
6738 case WM_T_ICH9:
6739 case WM_T_ICH10:
6740 case WM_T_PCH:
6741 reg = CSR_READ(sc, WMREG_FWSM);
6742 if ((reg & FWSM_RSPCIPHY) != 0)
6743 return 0;
6744 else
6745 return -1;
6746 break;
6747 case WM_T_82571:
6748 case WM_T_82572:
6749 case WM_T_82573:
6750 case WM_T_82574:
6751 case WM_T_82583:
6752 case WM_T_80003:
6753 reg = CSR_READ(sc, WMREG_MANC);
6754 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6755 return -1;
6756 else
6757 return 0;
6758 break;
6759 default:
6760 /* no problem */
6761 break;
6762 }
6763
6764 return 0;
6765 }
6766
6767 static void
6768 wm_get_hw_control(struct wm_softc *sc)
6769 {
6770 uint32_t reg;
6771
6772 switch (sc->sc_type) {
6773 case WM_T_82573:
6774 #if 0
6775 case WM_T_82574:
6776 case WM_T_82583:
6777 /*
6778 * FreeBSD's em driver has the function for 82574 to checks
6779 * the management mode, but it's not used. Why?
6780 */
6781 #endif
6782 reg = CSR_READ(sc, WMREG_SWSM);
6783 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6784 break;
6785 case WM_T_82571:
6786 case WM_T_82572:
6787 case WM_T_80003:
6788 case WM_T_ICH8:
6789 case WM_T_ICH9:
6790 case WM_T_ICH10:
6791 case WM_T_PCH:
6792 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6793 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6794 break;
6795 default:
6796 break;
6797 }
6798 }
6799
6800 /* XXX Currently TBI only */
6801 static int
6802 wm_check_for_link(struct wm_softc *sc)
6803 {
6804 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6805 uint32_t rxcw;
6806 uint32_t ctrl;
6807 uint32_t status;
6808 uint32_t sig;
6809
6810 rxcw = CSR_READ(sc, WMREG_RXCW);
6811 ctrl = CSR_READ(sc, WMREG_CTRL);
6812 status = CSR_READ(sc, WMREG_STATUS);
6813
6814 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6815
6816 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6817 device_xname(sc->sc_dev), __func__,
6818 ((ctrl & CTRL_SWDPIN(1)) == sig),
6819 ((status & STATUS_LU) != 0),
6820 ((rxcw & RXCW_C) != 0)
6821 ));
6822
6823 /*
6824 * SWDPIN LU RXCW
6825 * 0 0 0
6826 * 0 0 1 (should not happen)
6827 * 0 1 0 (should not happen)
6828 * 0 1 1 (should not happen)
6829 * 1 0 0 Disable autonego and force linkup
6830 * 1 0 1 got /C/ but not linkup yet
6831 * 1 1 0 (linkup)
6832 * 1 1 1 If IFM_AUTO, back to autonego
6833 *
6834 */
6835 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6836 && ((status & STATUS_LU) == 0)
6837 && ((rxcw & RXCW_C) == 0)) {
6838 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6839 __func__));
6840 sc->sc_tbi_linkup = 0;
6841 /* Disable auto-negotiation in the TXCW register */
6842 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6843
6844 /*
6845 * Force link-up and also force full-duplex.
6846 *
6847 * NOTE: CTRL was updated TFCE and RFCE automatically,
6848 * so we should update sc->sc_ctrl
6849 */
6850 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6851 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6852 } else if (((status & STATUS_LU) != 0)
6853 && ((rxcw & RXCW_C) != 0)
6854 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6855 sc->sc_tbi_linkup = 1;
6856 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6857 __func__));
6858 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6859 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6860 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6861 && ((rxcw & RXCW_C) != 0)) {
6862 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6863 } else {
6864 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6865 status));
6866 }
6867
6868 return 0;
6869 }
6870
6871 /* Work-around for 82566 Kumeran PCS lock loss */
6872 static void
6873 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
6874 {
6875 int miistatus, active, i;
6876 int reg;
6877
6878 miistatus = sc->sc_mii.mii_media_status;
6879
6880 /* If the link is not up, do nothing */
6881 if ((miistatus & IFM_ACTIVE) != 0)
6882 return;
6883
6884 active = sc->sc_mii.mii_media_active;
6885
6886 /* Nothing to do if the link is other than 1Gbps */
6887 if (IFM_SUBTYPE(active) != IFM_1000_T)
6888 return;
6889
6890 for (i = 0; i < 10; i++) {
6891 /* read twice */
6892 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
6893 reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
6894 if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
6895 goto out; /* GOOD! */
6896
6897 /* Reset the PHY */
6898 wm_gmii_reset(sc);
6899 delay(5*1000);
6900 }
6901
6902 /* Disable GigE link negotiation */
6903 reg = CSR_READ(sc, WMREG_PHY_CTRL);
6904 reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
6905 CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
6906
6907 /*
6908 * Call gig speed drop workaround on Gig disable before accessing
6909 * any PHY registers.
6910 */
6911 wm_gig_downshift_workaround_ich8lan(sc);
6912
6913 out:
6914 return;
6915 }
6916
6917 /* WOL from S5 stops working */
6918 static void
6919 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
6920 {
6921 uint16_t kmrn_reg;
6922
6923 /* Only for igp3 */
6924 if (sc->sc_phytype == WMPHY_IGP_3) {
6925 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
6926 kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
6927 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
6928 kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
6929 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
6930 }
6931 }
6932
6933 /*
6934 * Workaround for pch's PHYs
6935 * XXX should be moved to new PHY driver?
6936 */
6937 static void
6938 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
6939 {
6940
6941 /* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
6942
6943 /* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
6944
6945 /* 82578 */
6946 if (sc->sc_phytype == WMPHY_82578) {
6947 /* PCH rev. < 3 */
6948 if (sc->sc_rev < 3) {
6949 /* XXX 6 bit shift? Why? Is it page2? */
6950 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
6951 0x66c0);
6952 wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
6953 0xffff);
6954 }
6955
6956 /* XXX phy rev. < 2 */
6957 }
6958
6959 /* Select page 0 */
6960
6961 /* XXX acquire semaphore */
6962 wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
6963 /* XXX release semaphore */
6964
6965 /*
6966 * Configure the K1 Si workaround during phy reset assuming there is
6967 * link so that it disables K1 if link is in 1Gbps.
6968 */
6969 wm_k1_gig_workaround_hv(sc, 1);
6970 }
6971
6972 static void
6973 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
6974 {
6975 int k1_enable = sc->sc_nvm_k1_enabled;
6976
6977 /* XXX acquire semaphore */
6978
6979 if (link) {
6980 k1_enable = 0;
6981
6982 /* Link stall fix for link up */
6983 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
6984 } else {
6985 /* Link stall fix for link down */
6986 wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
6987 }
6988
6989 wm_configure_k1_ich8lan(sc, k1_enable);
6990
6991 /* XXX release semaphore */
6992 }
6993
6994 static void
6995 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
6996 {
6997 uint32_t ctrl, ctrl_ext, tmp;
6998 uint16_t kmrn_reg;
6999
7000 kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
7001
7002 if (k1_enable)
7003 kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
7004 else
7005 kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
7006
7007 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
7008
7009 delay(20);
7010
7011 ctrl = CSR_READ(sc, WMREG_CTRL);
7012 ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
7013
7014 tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
7015 tmp |= CTRL_FRCSPD;
7016
7017 CSR_WRITE(sc, WMREG_CTRL, tmp);
7018 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
7019 delay(20);
7020
7021 CSR_WRITE(sc, WMREG_CTRL, ctrl);
7022 CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
7023 delay(20);
7024 }
7025
7026 static void
7027 wm_set_pcie_completion_timeout(struct wm_softc *sc)
7028 {
7029 uint32_t gcr;
7030 pcireg_t ctrl2;
7031
7032 gcr = CSR_READ(sc, WMREG_GCR);
7033
7034 /* Only take action if timeout value is defaulted to 0 */
7035 if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
7036 goto out;
7037
7038 if ((gcr & GCR_CAP_VER2) == 0) {
7039 gcr |= GCR_CMPL_TMOUT_10MS;
7040 goto out;
7041 }
7042
7043 ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
7044 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2);
7045 ctrl2 |= WM_PCI_PCIE_DCSR2_16MS;
7046 pci_conf_write(sc->sc_pc, sc->sc_pcitag,
7047 sc->sc_pcixe_capoff + PCI_PCIE_DCSR2, ctrl2);
7048
7049 out:
7050 /* Disable completion timeout resend */
7051 gcr &= ~GCR_CMPL_TMOUT_RESEND;
7052
7053 CSR_WRITE(sc, WMREG_GCR, gcr);
7054 }
7055
7056 /* special case - for 82575 - need to do manual init ... */
7057 static void
7058 wm_reset_init_script_82575(struct wm_softc *sc)
7059 {
7060 /*
7061 * remark: this is untested code - we have no board without EEPROM
7062 * same setup as mentioned int the freeBSD driver for the i82575
7063 */
7064
7065 /* SerDes configuration via SERDESCTRL */
7066 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
7067 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
7068 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
7069 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
7070
7071 /* CCM configuration via CCMCTL register */
7072 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
7073 wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
7074
7075 /* PCIe lanes configuration */
7076 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
7077 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
7078 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
7079 wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
7080
7081 /* PCIe PLL Configuration */
7082 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
7083 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
7084 wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
7085 }
7086