if_wm.c revision 1.191 1 /* $NetBSD: if_wm.c,v 1.191 2010/01/12 22:26:30 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.191 2010/01/12 22:26:30 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126 #include <dev/mii/igphyreg.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131
132 #include <dev/pci/if_wmreg.h>
133 #include <dev/pci/if_wmvar.h>
134
135 #ifdef WM_DEBUG
136 #define WM_DEBUG_LINK 0x01
137 #define WM_DEBUG_TX 0x02
138 #define WM_DEBUG_RX 0x04
139 #define WM_DEBUG_GMII 0x08
140 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
141
142 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
143 #else
144 #define DPRINTF(x, y) /* nothing */
145 #endif /* WM_DEBUG */
146
147 /*
148 * Transmit descriptor list size. Due to errata, we can only have
149 * 256 hardware descriptors in the ring on < 82544, but we use 4096
150 * on >= 82544. We tell the upper layers that they can queue a lot
151 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
152 * of them at a time.
153 *
154 * We allow up to 256 (!) DMA segments per packet. Pathological packet
155 * chains containing many small mbufs have been observed in zero-copy
156 * situations with jumbo frames.
157 */
158 #define WM_NTXSEGS 256
159 #define WM_IFQUEUELEN 256
160 #define WM_TXQUEUELEN_MAX 64
161 #define WM_TXQUEUELEN_MAX_82547 16
162 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
163 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
164 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
165 #define WM_NTXDESC_82542 256
166 #define WM_NTXDESC_82544 4096
167 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
168 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
169 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
170 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
171 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
172
173 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
174
175 /*
176 * Receive descriptor list size. We have one Rx buffer for normal
177 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
178 * packet. We allocate 256 receive descriptors, each with a 2k
179 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
180 */
181 #define WM_NRXDESC 256
182 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
183 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
184 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
185
186 /*
187 * Control structures are DMA'd to the i82542 chip. We allocate them in
188 * a single clump that maps to a single DMA segment to make several things
189 * easier.
190 */
191 struct wm_control_data_82544 {
192 /*
193 * The receive descriptors.
194 */
195 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
196
197 /*
198 * The transmit descriptors. Put these at the end, because
199 * we might use a smaller number of them.
200 */
201 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
202 };
203
204 struct wm_control_data_82542 {
205 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208
209 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
210 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
211 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
212
213 /*
214 * Software state for transmit jobs.
215 */
216 struct wm_txsoft {
217 struct mbuf *txs_mbuf; /* head of our mbuf chain */
218 bus_dmamap_t txs_dmamap; /* our DMA map */
219 int txs_firstdesc; /* first descriptor in packet */
220 int txs_lastdesc; /* last descriptor in packet */
221 int txs_ndesc; /* # of descriptors used */
222 };
223
224 /*
225 * Software state for receive buffers. Each descriptor gets a
226 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
227 * more than one buffer, we chain them together.
228 */
229 struct wm_rxsoft {
230 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
231 bus_dmamap_t rxs_dmamap; /* our DMA map */
232 };
233
234 #define WM_LINKUP_TIMEOUT 50
235
236 /*
237 * Software state per device.
238 */
239 struct wm_softc {
240 device_t sc_dev; /* generic device information */
241 bus_space_tag_t sc_st; /* bus space tag */
242 bus_space_handle_t sc_sh; /* bus space handle */
243 bus_space_tag_t sc_iot; /* I/O space tag */
244 bus_space_handle_t sc_ioh; /* I/O space handle */
245 bus_space_tag_t sc_flasht; /* flash registers space tag */
246 bus_space_handle_t sc_flashh; /* flash registers space handle */
247 bus_dma_tag_t sc_dmat; /* bus DMA tag */
248 struct ethercom sc_ethercom; /* ethernet common data */
249 pci_chipset_tag_t sc_pc;
250 pcitag_t sc_pcitag;
251
252 wm_chip_type sc_type; /* chip type */
253 int sc_flags; /* flags; see below */
254 int sc_if_flags; /* last if_flags */
255 int sc_bus_speed; /* PCI/PCIX bus speed */
256 int sc_pcix_offset; /* PCIX capability register offset */
257 int sc_flowflags; /* 802.3x flow control flags */
258
259 void *sc_ih; /* interrupt cookie */
260
261 int sc_ee_addrbits; /* EEPROM address bits */
262
263 struct mii_data sc_mii; /* MII/media information */
264
265 callout_t sc_tick_ch; /* tick callout */
266
267 bus_dmamap_t sc_cddmamap; /* control data DMA map */
268 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
269
270 int sc_align_tweak;
271
272 /*
273 * Software state for the transmit and receive descriptors.
274 */
275 int sc_txnum; /* must be a power of two */
276 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
277 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
278
279 /*
280 * Control data structures.
281 */
282 int sc_ntxdesc; /* must be a power of two */
283 struct wm_control_data_82544 *sc_control_data;
284 #define sc_txdescs sc_control_data->wcd_txdescs
285 #define sc_rxdescs sc_control_data->wcd_rxdescs
286
287 #ifdef WM_EVENT_COUNTERS
288 /* Event counters. */
289 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
290 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
291 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
292 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
293 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
294 struct evcnt sc_ev_rxintr; /* Rx interrupts */
295 struct evcnt sc_ev_linkintr; /* Link interrupts */
296
297 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
298 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
299 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
300 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
301 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
302 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
303 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
304 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
305
306 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
307 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
308
309 struct evcnt sc_ev_tu; /* Tx underrun */
310
311 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
312 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
313 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
314 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
315 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
316 #endif /* WM_EVENT_COUNTERS */
317
318 bus_addr_t sc_tdt_reg; /* offset of TDT register */
319
320 int sc_txfree; /* number of free Tx descriptors */
321 int sc_txnext; /* next ready Tx descriptor */
322
323 int sc_txsfree; /* number of free Tx jobs */
324 int sc_txsnext; /* next free Tx job */
325 int sc_txsdirty; /* dirty Tx jobs */
326
327 /* These 5 variables are used only on the 82547. */
328 int sc_txfifo_size; /* Tx FIFO size */
329 int sc_txfifo_head; /* current head of FIFO */
330 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
331 int sc_txfifo_stall; /* Tx FIFO is stalled */
332 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
333
334 bus_addr_t sc_rdt_reg; /* offset of RDT register */
335
336 int sc_rxptr; /* next ready Rx descriptor/queue ent */
337 int sc_rxdiscard;
338 int sc_rxlen;
339 struct mbuf *sc_rxhead;
340 struct mbuf *sc_rxtail;
341 struct mbuf **sc_rxtailp;
342
343 uint32_t sc_ctrl; /* prototype CTRL register */
344 #if 0
345 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
346 #endif
347 uint32_t sc_icr; /* prototype interrupt bits */
348 uint32_t sc_itr; /* prototype intr throttling reg */
349 uint32_t sc_tctl; /* prototype TCTL register */
350 uint32_t sc_rctl; /* prototype RCTL register */
351 uint32_t sc_txcw; /* prototype TXCW register */
352 uint32_t sc_tipg; /* prototype TIPG register */
353 uint32_t sc_fcrtl; /* prototype FCRTL register */
354 uint32_t sc_pba; /* prototype PBA register */
355
356 int sc_tbi_linkup; /* TBI link status */
357 int sc_tbi_anegticks; /* autonegotiation ticks */
358 int sc_tbi_ticks; /* tbi ticks */
359 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
360 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
361
362 int sc_mchash_type; /* multicast filter offset */
363
364 #if NRND > 0
365 rndsource_element_t rnd_source; /* random source */
366 #endif
367 int sc_ich8_flash_base;
368 int sc_ich8_flash_bank_size;
369 };
370
371 #define WM_RXCHAIN_RESET(sc) \
372 do { \
373 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
374 *(sc)->sc_rxtailp = NULL; \
375 (sc)->sc_rxlen = 0; \
376 } while (/*CONSTCOND*/0)
377
378 #define WM_RXCHAIN_LINK(sc, m) \
379 do { \
380 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
381 (sc)->sc_rxtailp = &(m)->m_next; \
382 } while (/*CONSTCOND*/0)
383
384 #ifdef WM_EVENT_COUNTERS
385 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
386 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
387 #else
388 #define WM_EVCNT_INCR(ev) /* nothing */
389 #define WM_EVCNT_ADD(ev, val) /* nothing */
390 #endif
391
392 #define CSR_READ(sc, reg) \
393 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
394 #define CSR_WRITE(sc, reg, val) \
395 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
396 #define CSR_WRITE_FLUSH(sc) \
397 (void) CSR_READ((sc), WMREG_STATUS)
398
399 #define ICH8_FLASH_READ32(sc, reg) \
400 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
401 #define ICH8_FLASH_WRITE32(sc, reg, data) \
402 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
403
404 #define ICH8_FLASH_READ16(sc, reg) \
405 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
406 #define ICH8_FLASH_WRITE16(sc, reg, data) \
407 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
408
409 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
410 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
411
412 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
413 #define WM_CDTXADDR_HI(sc, x) \
414 (sizeof(bus_addr_t) == 8 ? \
415 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
416
417 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
418 #define WM_CDRXADDR_HI(sc, x) \
419 (sizeof(bus_addr_t) == 8 ? \
420 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
421
422 #define WM_CDTXSYNC(sc, x, n, ops) \
423 do { \
424 int __x, __n; \
425 \
426 __x = (x); \
427 __n = (n); \
428 \
429 /* If it will wrap around, sync to the end of the ring. */ \
430 if ((__x + __n) > WM_NTXDESC(sc)) { \
431 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
432 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
433 (WM_NTXDESC(sc) - __x), (ops)); \
434 __n -= (WM_NTXDESC(sc) - __x); \
435 __x = 0; \
436 } \
437 \
438 /* Now sync whatever is left. */ \
439 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
440 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
441 } while (/*CONSTCOND*/0)
442
443 #define WM_CDRXSYNC(sc, x, ops) \
444 do { \
445 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
446 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
447 } while (/*CONSTCOND*/0)
448
449 #define WM_INIT_RXDESC(sc, x) \
450 do { \
451 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
452 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
453 struct mbuf *__m = __rxs->rxs_mbuf; \
454 \
455 /* \
456 * Note: We scoot the packet forward 2 bytes in the buffer \
457 * so that the payload after the Ethernet header is aligned \
458 * to a 4-byte boundary. \
459 * \
460 * XXX BRAINDAMAGE ALERT! \
461 * The stupid chip uses the same size for every buffer, which \
462 * is set in the Receive Control register. We are using the 2K \
463 * size option, but what we REALLY want is (2K - 2)! For this \
464 * reason, we can't "scoot" packets longer than the standard \
465 * Ethernet MTU. On strict-alignment platforms, if the total \
466 * size exceeds (2K - 2) we set align_tweak to 0 and let \
467 * the upper layer copy the headers. \
468 */ \
469 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
470 \
471 wm_set_dma_addr(&__rxd->wrx_addr, \
472 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
473 __rxd->wrx_len = 0; \
474 __rxd->wrx_cksum = 0; \
475 __rxd->wrx_status = 0; \
476 __rxd->wrx_errors = 0; \
477 __rxd->wrx_special = 0; \
478 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
479 \
480 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
481 } while (/*CONSTCOND*/0)
482
483 static void wm_start(struct ifnet *);
484 static void wm_watchdog(struct ifnet *);
485 static int wm_ioctl(struct ifnet *, u_long, void *);
486 static int wm_init(struct ifnet *);
487 static void wm_stop(struct ifnet *, int);
488
489 static void wm_reset(struct wm_softc *);
490 static void wm_rxdrain(struct wm_softc *);
491 static int wm_add_rxbuf(struct wm_softc *, int);
492 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
493 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
494 static int wm_validate_eeprom_checksum(struct wm_softc *);
495 static void wm_tick(void *);
496
497 static void wm_set_filter(struct wm_softc *);
498
499 static int wm_intr(void *);
500 static void wm_txintr(struct wm_softc *);
501 static void wm_rxintr(struct wm_softc *);
502 static void wm_linkintr(struct wm_softc *, uint32_t);
503
504 static void wm_tbi_mediainit(struct wm_softc *);
505 static int wm_tbi_mediachange(struct ifnet *);
506 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
507
508 static void wm_tbi_set_linkled(struct wm_softc *);
509 static void wm_tbi_check_link(struct wm_softc *);
510
511 static void wm_gmii_reset(struct wm_softc *);
512
513 static int wm_gmii_i82543_readreg(device_t, int, int);
514 static void wm_gmii_i82543_writereg(device_t, int, int, int);
515
516 static int wm_gmii_i82544_readreg(device_t, int, int);
517 static void wm_gmii_i82544_writereg(device_t, int, int, int);
518
519 static int wm_gmii_i80003_readreg(device_t, int, int);
520 static void wm_gmii_i80003_writereg(device_t, int, int, int);
521 static int wm_gmii_bm_readreg(device_t, int, int);
522 static void wm_gmii_bm_writereg(device_t, int, int, int);
523 static int wm_gmii_kv_readreg(device_t, int, int);
524 static void wm_gmii_kv_writereg(device_t, int, int, int);
525
526 static void wm_gmii_statchg(device_t);
527
528 static void wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
529 static int wm_gmii_mediachange(struct ifnet *);
530 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
531
532 static int wm_kmrn_readreg(struct wm_softc *, int);
533 static void wm_kmrn_writereg(struct wm_softc *, int, int);
534
535 static void wm_set_spiaddrsize(struct wm_softc *);
536 static int wm_match(device_t, cfdata_t, void *);
537 static void wm_attach(device_t, device_t, void *);
538 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
539 static void wm_get_auto_rd_done(struct wm_softc *);
540 static void wm_lan_init_done(struct wm_softc *);
541 static void wm_get_cfg_done(struct wm_softc *);
542 static int wm_get_swsm_semaphore(struct wm_softc *);
543 static void wm_put_swsm_semaphore(struct wm_softc *);
544 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
545 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
546 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
547 static int wm_get_swfwhw_semaphore(struct wm_softc *);
548 static void wm_put_swfwhw_semaphore(struct wm_softc *);
549
550 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
551 static int32_t wm_ich8_cycle_init(struct wm_softc *);
552 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
553 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
554 uint32_t, uint16_t *);
555 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
556 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
557 static void wm_82547_txfifo_stall(void *);
558 static int wm_check_mng_mode(struct wm_softc *);
559 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
560 static int wm_check_mng_mode_82574(struct wm_softc *);
561 static int wm_check_mng_mode_generic(struct wm_softc *);
562 static int wm_check_reset_block(struct wm_softc *);
563 static void wm_get_hw_control(struct wm_softc *);
564 static int wm_check_for_link(struct wm_softc *);
565
566 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
567 wm_match, wm_attach, NULL, NULL);
568
569 /*
570 * Devices supported by this driver.
571 */
572 static const struct wm_product {
573 pci_vendor_id_t wmp_vendor;
574 pci_product_id_t wmp_product;
575 const char *wmp_name;
576 wm_chip_type wmp_type;
577 int wmp_flags;
578 #define WMP_F_1000X 0x01
579 #define WMP_F_1000T 0x02
580 } wm_products[] = {
581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
582 "Intel i82542 1000BASE-X Ethernet",
583 WM_T_82542_2_1, WMP_F_1000X },
584
585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
586 "Intel i82543GC 1000BASE-X Ethernet",
587 WM_T_82543, WMP_F_1000X },
588
589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
590 "Intel i82543GC 1000BASE-T Ethernet",
591 WM_T_82543, WMP_F_1000T },
592
593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
594 "Intel i82544EI 1000BASE-T Ethernet",
595 WM_T_82544, WMP_F_1000T },
596
597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
598 "Intel i82544EI 1000BASE-X Ethernet",
599 WM_T_82544, WMP_F_1000X },
600
601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
602 "Intel i82544GC 1000BASE-T Ethernet",
603 WM_T_82544, WMP_F_1000T },
604
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
606 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
607 WM_T_82544, WMP_F_1000T },
608
609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
610 "Intel i82540EM 1000BASE-T Ethernet",
611 WM_T_82540, WMP_F_1000T },
612
613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
614 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
615 WM_T_82540, WMP_F_1000T },
616
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
618 "Intel i82540EP 1000BASE-T Ethernet",
619 WM_T_82540, WMP_F_1000T },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
622 "Intel i82540EP 1000BASE-T Ethernet",
623 WM_T_82540, WMP_F_1000T },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
626 "Intel i82540EP 1000BASE-T Ethernet",
627 WM_T_82540, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
630 "Intel i82545EM 1000BASE-T Ethernet",
631 WM_T_82545, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
634 "Intel i82545GM 1000BASE-T Ethernet",
635 WM_T_82545_3, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
638 "Intel i82545GM 1000BASE-X Ethernet",
639 WM_T_82545_3, WMP_F_1000X },
640 #if 0
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
642 "Intel i82545GM Gigabit Ethernet (SERDES)",
643 WM_T_82545_3, WMP_F_SERDES },
644 #endif
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
646 "Intel i82546EB 1000BASE-T Ethernet",
647 WM_T_82546, WMP_F_1000T },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
650 "Intel i82546EB 1000BASE-T Ethernet",
651 WM_T_82546, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
654 "Intel i82545EM 1000BASE-X Ethernet",
655 WM_T_82545, WMP_F_1000X },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
658 "Intel i82546EB 1000BASE-X Ethernet",
659 WM_T_82546, WMP_F_1000X },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
662 "Intel i82546GB 1000BASE-T Ethernet",
663 WM_T_82546_3, WMP_F_1000T },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
666 "Intel i82546GB 1000BASE-X Ethernet",
667 WM_T_82546_3, WMP_F_1000X },
668 #if 0
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
670 "Intel i82546GB Gigabit Ethernet (SERDES)",
671 WM_T_82546_3, WMP_F_SERDES },
672 #endif
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
674 "i82546GB quad-port Gigabit Ethernet",
675 WM_T_82546_3, WMP_F_1000T },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
678 "i82546GB quad-port Gigabit Ethernet (KSP3)",
679 WM_T_82546_3, WMP_F_1000T },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
682 "Intel PRO/1000MT (82546GB)",
683 WM_T_82546_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
686 "Intel i82541EI 1000BASE-T Ethernet",
687 WM_T_82541, WMP_F_1000T },
688
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
690 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
691 WM_T_82541, WMP_F_1000T },
692
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
694 "Intel i82541EI Mobile 1000BASE-T Ethernet",
695 WM_T_82541, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
698 "Intel i82541ER 1000BASE-T Ethernet",
699 WM_T_82541_2, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
702 "Intel i82541GI 1000BASE-T Ethernet",
703 WM_T_82541_2, WMP_F_1000T },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
706 "Intel i82541GI Mobile 1000BASE-T Ethernet",
707 WM_T_82541_2, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
710 "Intel i82541PI 1000BASE-T Ethernet",
711 WM_T_82541_2, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
714 "Intel i82547EI 1000BASE-T Ethernet",
715 WM_T_82547, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
718 "Intel i82547EI Mobile 1000BASE-T Ethernet",
719 WM_T_82547, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
722 "Intel i82547GI 1000BASE-T Ethernet",
723 WM_T_82547_2, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
726 "Intel PRO/1000 PT (82571EB)",
727 WM_T_82571, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
730 "Intel PRO/1000 PF (82571EB)",
731 WM_T_82571, WMP_F_1000X },
732 #if 0
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
734 "Intel PRO/1000 PB (82571EB)",
735 WM_T_82571, WMP_F_SERDES },
736 #endif
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
738 "Intel PRO/1000 QT (82571EB)",
739 WM_T_82571, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
742 "Intel i82572EI 1000baseT Ethernet",
743 WM_T_82572, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
746 "Intel PRO/1000 PT Quad Port Server Adapter",
747 WM_T_82571, WMP_F_1000T, },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
750 "Intel i82572EI 1000baseX Ethernet",
751 WM_T_82572, WMP_F_1000X },
752 #if 0
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
754 "Intel i82572EI Gigabit Ethernet (SERDES)",
755 WM_T_82572, WMP_F_SERDES },
756 #endif
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
759 "Intel i82572EI 1000baseT Ethernet",
760 WM_T_82572, WMP_F_1000T },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
763 "Intel i82573E",
764 WM_T_82573, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
767 "Intel i82573E IAMT",
768 WM_T_82573, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
771 "Intel i82573L Gigabit Ethernet",
772 WM_T_82573, WMP_F_1000T },
773
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
775 "Intel i82574L",
776 WM_T_82574, WMP_F_1000T },
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
779 "Intel i82583V",
780 WM_T_82583, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
783 "i80003 dual 1000baseT Ethernet",
784 WM_T_80003, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
787 "i80003 dual 1000baseX Ethernet",
788 WM_T_80003, WMP_F_1000T },
789 #if 0
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
791 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
792 WM_T_80003, WMP_F_SERDES },
793 #endif
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
796 "Intel i80003 1000baseT Ethernet",
797 WM_T_80003, WMP_F_1000T },
798 #if 0
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
800 "Intel i80003 Gigabit Ethernet (SERDES)",
801 WM_T_80003, WMP_F_SERDES },
802 #endif
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
804 "Intel i82801H (M_AMT) LAN Controller",
805 WM_T_ICH8, WMP_F_1000T },
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
807 "Intel i82801H (AMT) LAN Controller",
808 WM_T_ICH8, WMP_F_1000T },
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
810 "Intel i82801H LAN Controller",
811 WM_T_ICH8, WMP_F_1000T },
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
813 "Intel i82801H (IFE) LAN Controller",
814 WM_T_ICH8, WMP_F_1000T },
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
816 "Intel i82801H (M) LAN Controller",
817 WM_T_ICH8, WMP_F_1000T },
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
819 "Intel i82801H IFE (GT) LAN Controller",
820 WM_T_ICH8, WMP_F_1000T },
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
822 "Intel i82801H IFE (G) LAN Controller",
823 WM_T_ICH8, WMP_F_1000T },
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
825 "82801I (AMT) LAN Controller",
826 WM_T_ICH9, WMP_F_1000T },
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
828 "82801I LAN Controller",
829 WM_T_ICH9, WMP_F_1000T },
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
831 "82801I (G) LAN Controller",
832 WM_T_ICH9, WMP_F_1000T },
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
834 "82801I (GT) LAN Controller",
835 WM_T_ICH9, WMP_F_1000T },
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
837 "82801I (C) LAN Controller",
838 WM_T_ICH9, WMP_F_1000T },
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
840 "82801I mobile LAN Controller",
841 WM_T_ICH9, WMP_F_1000T },
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
843 "82801I mobile (V) LAN Controller",
844 WM_T_ICH9, WMP_F_1000T },
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
846 "82801I mobile (AMT) LAN Controller",
847 WM_T_ICH9, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_BM,
849 "82567LM-4 LAN Controller",
850 WM_T_ICH9, WMP_F_1000T },
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_82567V_3,
852 "82567V-3 LAN Controller",
853 WM_T_ICH9, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LM,
855 "82567LM-2 LAN Controller",
856 WM_T_ICH10, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_LF,
858 "82567LF-2 LAN Controller",
859 WM_T_ICH10, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LM,
861 "82567LM-3 LAN Controller",
862 WM_T_ICH10, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
864 "82567LF-3 LAN Controller",
865 WM_T_ICH10, WMP_F_1000T },
866 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_R_BM_V,
867 "82567V-2 LAN Controller",
868 WM_T_ICH10, WMP_F_1000T },
869 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
870 "PCH LAN (82578LM) Controller",
871 WM_T_PCH, WMP_F_1000T },
872 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
873 "PCH LAN (82578LC) Controller",
874 WM_T_PCH, WMP_F_1000T },
875 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
876 "PCH LAN (82578DM) Controller",
877 WM_T_PCH, WMP_F_1000T },
878 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
879 "PCH LAN (82578DC) Controller",
880 WM_T_PCH, WMP_F_1000T },
881 { 0, 0,
882 NULL,
883 0, 0 },
884 };
885
886 #ifdef WM_EVENT_COUNTERS
887 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
888 #endif /* WM_EVENT_COUNTERS */
889
890 #if 0 /* Not currently used */
891 static inline uint32_t
892 wm_io_read(struct wm_softc *sc, int reg)
893 {
894
895 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
896 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
897 }
898 #endif
899
900 static inline void
901 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
902 {
903
904 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
905 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
906 }
907
908 static inline void
909 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
910 {
911 wa->wa_low = htole32(v & 0xffffffffU);
912 if (sizeof(bus_addr_t) == 8)
913 wa->wa_high = htole32((uint64_t) v >> 32);
914 else
915 wa->wa_high = 0;
916 }
917
918 static void
919 wm_set_spiaddrsize(struct wm_softc *sc)
920 {
921 uint32_t reg;
922
923 sc->sc_flags |= WM_F_EEPROM_SPI;
924 reg = CSR_READ(sc, WMREG_EECD);
925 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
926 }
927
928 static const struct wm_product *
929 wm_lookup(const struct pci_attach_args *pa)
930 {
931 const struct wm_product *wmp;
932
933 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
934 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
935 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
936 return (wmp);
937 }
938 return (NULL);
939 }
940
941 static int
942 wm_match(device_t parent, cfdata_t cf, void *aux)
943 {
944 struct pci_attach_args *pa = aux;
945
946 if (wm_lookup(pa) != NULL)
947 return (1);
948
949 return (0);
950 }
951
952 static void
953 wm_attach(device_t parent, device_t self, void *aux)
954 {
955 struct wm_softc *sc = device_private(self);
956 struct pci_attach_args *pa = aux;
957 prop_dictionary_t dict;
958 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
959 pci_chipset_tag_t pc = pa->pa_pc;
960 pci_intr_handle_t ih;
961 size_t cdata_size;
962 const char *intrstr = NULL;
963 const char *eetype, *xname;
964 bus_space_tag_t memt;
965 bus_space_handle_t memh;
966 bus_dma_segment_t seg;
967 int memh_valid;
968 int i, rseg, error;
969 const struct wm_product *wmp;
970 prop_data_t ea;
971 prop_number_t pn;
972 uint8_t enaddr[ETHER_ADDR_LEN];
973 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
974 pcireg_t preg, memtype;
975 uint32_t reg;
976
977 sc->sc_dev = self;
978 callout_init(&sc->sc_tick_ch, 0);
979
980 wmp = wm_lookup(pa);
981 if (wmp == NULL) {
982 printf("\n");
983 panic("wm_attach: impossible");
984 }
985
986 sc->sc_pc = pa->pa_pc;
987 sc->sc_pcitag = pa->pa_tag;
988
989 if (pci_dma64_available(pa))
990 sc->sc_dmat = pa->pa_dmat64;
991 else
992 sc->sc_dmat = pa->pa_dmat;
993
994 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
995 aprint_naive(": Ethernet controller\n");
996 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
997
998 sc->sc_type = wmp->wmp_type;
999 if (sc->sc_type < WM_T_82543) {
1000 if (preg < 2) {
1001 aprint_error_dev(sc->sc_dev,
1002 "i82542 must be at least rev. 2\n");
1003 return;
1004 }
1005 if (preg < 3)
1006 sc->sc_type = WM_T_82542_2_0;
1007 }
1008
1009 /* Set device properties (mactype) */
1010 dict = device_properties(sc->sc_dev);
1011 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1012
1013 /*
1014 * Map the device. All devices support memory-mapped acccess,
1015 * and it is really required for normal operation.
1016 */
1017 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1018 switch (memtype) {
1019 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1020 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1021 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1022 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1023 break;
1024 default:
1025 memh_valid = 0;
1026 break;
1027 }
1028
1029 if (memh_valid) {
1030 sc->sc_st = memt;
1031 sc->sc_sh = memh;
1032 } else {
1033 aprint_error_dev(sc->sc_dev,
1034 "unable to map device registers\n");
1035 return;
1036 }
1037
1038 /*
1039 * In addition, i82544 and later support I/O mapped indirect
1040 * register access. It is not desirable (nor supported in
1041 * this driver) to use it for normal operation, though it is
1042 * required to work around bugs in some chip versions.
1043 */
1044 if (sc->sc_type >= WM_T_82544) {
1045 /* First we have to find the I/O BAR. */
1046 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1047 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1048 PCI_MAPREG_TYPE_IO)
1049 break;
1050 }
1051 if (i == PCI_MAPREG_END)
1052 aprint_error_dev(sc->sc_dev,
1053 "WARNING: unable to find I/O BAR\n");
1054 else {
1055 /*
1056 * The i8254x doesn't apparently respond when the
1057 * I/O BAR is 0, which looks somewhat like it's not
1058 * been configured.
1059 */
1060 preg = pci_conf_read(pc, pa->pa_tag, i);
1061 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1062 aprint_error_dev(sc->sc_dev,
1063 "WARNING: I/O BAR at zero.\n");
1064 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1065 0, &sc->sc_iot, &sc->sc_ioh,
1066 NULL, NULL) == 0) {
1067 sc->sc_flags |= WM_F_IOH_VALID;
1068 } else {
1069 aprint_error_dev(sc->sc_dev,
1070 "WARNING: unable to map I/O space\n");
1071 }
1072 }
1073
1074 }
1075
1076 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1077 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1078 preg |= PCI_COMMAND_MASTER_ENABLE;
1079 if (sc->sc_type < WM_T_82542_2_1)
1080 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1081 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1082
1083 /* power up chip */
1084 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1085 NULL)) && error != EOPNOTSUPP) {
1086 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1087 return;
1088 }
1089
1090 /*
1091 * Map and establish our interrupt.
1092 */
1093 if (pci_intr_map(pa, &ih)) {
1094 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1095 return;
1096 }
1097 intrstr = pci_intr_string(pc, ih);
1098 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1099 if (sc->sc_ih == NULL) {
1100 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1101 if (intrstr != NULL)
1102 aprint_error(" at %s", intrstr);
1103 aprint_error("\n");
1104 return;
1105 }
1106 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1107
1108 /*
1109 * Determine a few things about the bus we're connected to.
1110 */
1111 if (sc->sc_type < WM_T_82543) {
1112 /* We don't really know the bus characteristics here. */
1113 sc->sc_bus_speed = 33;
1114 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1115 /*
1116 * CSA (Communication Streaming Architecture) is about as fast
1117 * a 32-bit 66MHz PCI Bus.
1118 */
1119 sc->sc_flags |= WM_F_CSA;
1120 sc->sc_bus_speed = 66;
1121 aprint_verbose_dev(sc->sc_dev,
1122 "Communication Streaming Architecture\n");
1123 if (sc->sc_type == WM_T_82547) {
1124 callout_init(&sc->sc_txfifo_ch, 0);
1125 callout_setfunc(&sc->sc_txfifo_ch,
1126 wm_82547_txfifo_stall, sc);
1127 aprint_verbose_dev(sc->sc_dev,
1128 "using 82547 Tx FIFO stall work-around\n");
1129 }
1130 } else if (sc->sc_type >= WM_T_82571) {
1131 sc->sc_flags |= WM_F_PCIE;
1132 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1133 && (sc->sc_type != WM_T_ICH10)
1134 && (sc->sc_type != WM_T_PCH))
1135 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1136 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1137 } else {
1138 reg = CSR_READ(sc, WMREG_STATUS);
1139 if (reg & STATUS_BUS64)
1140 sc->sc_flags |= WM_F_BUS64;
1141 if ((reg & STATUS_PCIX_MODE) != 0) {
1142 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1143
1144 sc->sc_flags |= WM_F_PCIX;
1145 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1146 PCI_CAP_PCIX,
1147 &sc->sc_pcix_offset, NULL) == 0)
1148 aprint_error_dev(sc->sc_dev,
1149 "unable to find PCIX capability\n");
1150 else if (sc->sc_type != WM_T_82545_3 &&
1151 sc->sc_type != WM_T_82546_3) {
1152 /*
1153 * Work around a problem caused by the BIOS
1154 * setting the max memory read byte count
1155 * incorrectly.
1156 */
1157 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1158 sc->sc_pcix_offset + PCI_PCIX_CMD);
1159 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1160 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1161
1162 bytecnt =
1163 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1164 PCI_PCIX_CMD_BYTECNT_SHIFT;
1165 maxb =
1166 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1167 PCI_PCIX_STATUS_MAXB_SHIFT;
1168 if (bytecnt > maxb) {
1169 aprint_verbose_dev(sc->sc_dev,
1170 "resetting PCI-X MMRBC: %d -> %d\n",
1171 512 << bytecnt, 512 << maxb);
1172 pcix_cmd = (pcix_cmd &
1173 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1174 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1175 pci_conf_write(pa->pa_pc, pa->pa_tag,
1176 sc->sc_pcix_offset + PCI_PCIX_CMD,
1177 pcix_cmd);
1178 }
1179 }
1180 }
1181 /*
1182 * The quad port adapter is special; it has a PCIX-PCIX
1183 * bridge on the board, and can run the secondary bus at
1184 * a higher speed.
1185 */
1186 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1187 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1188 : 66;
1189 } else if (sc->sc_flags & WM_F_PCIX) {
1190 switch (reg & STATUS_PCIXSPD_MASK) {
1191 case STATUS_PCIXSPD_50_66:
1192 sc->sc_bus_speed = 66;
1193 break;
1194 case STATUS_PCIXSPD_66_100:
1195 sc->sc_bus_speed = 100;
1196 break;
1197 case STATUS_PCIXSPD_100_133:
1198 sc->sc_bus_speed = 133;
1199 break;
1200 default:
1201 aprint_error_dev(sc->sc_dev,
1202 "unknown PCIXSPD %d; assuming 66MHz\n",
1203 reg & STATUS_PCIXSPD_MASK);
1204 sc->sc_bus_speed = 66;
1205 break;
1206 }
1207 } else
1208 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1209 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1210 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1211 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1212 }
1213
1214 /*
1215 * Allocate the control data structures, and create and load the
1216 * DMA map for it.
1217 *
1218 * NOTE: All Tx descriptors must be in the same 4G segment of
1219 * memory. So must Rx descriptors. We simplify by allocating
1220 * both sets within the same 4G segment.
1221 */
1222 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1223 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1224 cdata_size = sc->sc_type < WM_T_82544 ?
1225 sizeof(struct wm_control_data_82542) :
1226 sizeof(struct wm_control_data_82544);
1227 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1228 (bus_size_t) 0x100000000ULL,
1229 &seg, 1, &rseg, 0)) != 0) {
1230 aprint_error_dev(sc->sc_dev,
1231 "unable to allocate control data, error = %d\n",
1232 error);
1233 goto fail_0;
1234 }
1235
1236 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1237 (void **)&sc->sc_control_data,
1238 BUS_DMA_COHERENT)) != 0) {
1239 aprint_error_dev(sc->sc_dev,
1240 "unable to map control data, error = %d\n", error);
1241 goto fail_1;
1242 }
1243
1244 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1245 0, 0, &sc->sc_cddmamap)) != 0) {
1246 aprint_error_dev(sc->sc_dev,
1247 "unable to create control data DMA map, error = %d\n",
1248 error);
1249 goto fail_2;
1250 }
1251
1252 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1253 sc->sc_control_data, cdata_size, NULL,
1254 0)) != 0) {
1255 aprint_error_dev(sc->sc_dev,
1256 "unable to load control data DMA map, error = %d\n",
1257 error);
1258 goto fail_3;
1259 }
1260
1261 /*
1262 * Create the transmit buffer DMA maps.
1263 */
1264 WM_TXQUEUELEN(sc) =
1265 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1266 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1267 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1268 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1269 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1270 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1271 aprint_error_dev(sc->sc_dev,
1272 "unable to create Tx DMA map %d, error = %d\n",
1273 i, error);
1274 goto fail_4;
1275 }
1276 }
1277
1278 /*
1279 * Create the receive buffer DMA maps.
1280 */
1281 for (i = 0; i < WM_NRXDESC; i++) {
1282 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1283 MCLBYTES, 0, 0,
1284 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1285 aprint_error_dev(sc->sc_dev,
1286 "unable to create Rx DMA map %d error = %d\n",
1287 i, error);
1288 goto fail_5;
1289 }
1290 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1291 }
1292
1293 /* clear interesting stat counters */
1294 CSR_READ(sc, WMREG_COLC);
1295 CSR_READ(sc, WMREG_RXERRC);
1296
1297 /*
1298 * Reset the chip to a known state.
1299 */
1300 wm_reset(sc);
1301
1302 switch (sc->sc_type) {
1303 case WM_T_82571:
1304 case WM_T_82572:
1305 case WM_T_82573:
1306 case WM_T_82574:
1307 case WM_T_82583:
1308 case WM_T_80003:
1309 case WM_T_ICH8:
1310 case WM_T_ICH9:
1311 case WM_T_ICH10:
1312 case WM_T_PCH:
1313 if (wm_check_mng_mode(sc) != 0)
1314 wm_get_hw_control(sc);
1315 break;
1316 default:
1317 break;
1318 }
1319
1320 /*
1321 * Get some information about the EEPROM.
1322 */
1323 switch (sc->sc_type) {
1324 case WM_T_82542_2_0:
1325 case WM_T_82542_2_1:
1326 case WM_T_82543:
1327 case WM_T_82544:
1328 /* Microwire */
1329 sc->sc_ee_addrbits = 6;
1330 break;
1331 case WM_T_82540:
1332 case WM_T_82545:
1333 case WM_T_82545_3:
1334 case WM_T_82546:
1335 case WM_T_82546_3:
1336 /* Microwire */
1337 reg = CSR_READ(sc, WMREG_EECD);
1338 if (reg & EECD_EE_SIZE)
1339 sc->sc_ee_addrbits = 8;
1340 else
1341 sc->sc_ee_addrbits = 6;
1342 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1343 break;
1344 case WM_T_82541:
1345 case WM_T_82541_2:
1346 case WM_T_82547:
1347 case WM_T_82547_2:
1348 reg = CSR_READ(sc, WMREG_EECD);
1349 if (reg & EECD_EE_TYPE) {
1350 /* SPI */
1351 wm_set_spiaddrsize(sc);
1352 } else
1353 /* Microwire */
1354 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1355 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1356 break;
1357 case WM_T_82571:
1358 case WM_T_82572:
1359 /* SPI */
1360 wm_set_spiaddrsize(sc);
1361 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1362 break;
1363 case WM_T_82573:
1364 case WM_T_82574:
1365 case WM_T_82583:
1366 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1367 sc->sc_flags |= WM_F_EEPROM_FLASH;
1368 else {
1369 /* SPI */
1370 wm_set_spiaddrsize(sc);
1371 }
1372 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1373 break;
1374 case WM_T_80003:
1375 /* SPI */
1376 wm_set_spiaddrsize(sc);
1377 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1378 break;
1379 case WM_T_ICH8:
1380 case WM_T_ICH9:
1381 /* Check whether EEPROM is present or not */
1382 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1383 /* Not found */
1384 aprint_error_dev(sc->sc_dev,
1385 "EEPROM PRESENT bit isn't set\n");
1386 sc->sc_flags |= WM_F_EEPROM_INVALID;
1387 }
1388 /* FALLTHROUGH */
1389 case WM_T_ICH10:
1390 case WM_T_PCH:
1391 /* FLASH */
1392 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1393 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1394 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1395 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1396 aprint_error_dev(sc->sc_dev,
1397 "can't map FLASH registers\n");
1398 return;
1399 }
1400 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1401 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1402 ICH_FLASH_SECTOR_SIZE;
1403 sc->sc_ich8_flash_bank_size =
1404 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1405 sc->sc_ich8_flash_bank_size -=
1406 (reg & ICH_GFPREG_BASE_MASK);
1407 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1408 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1409 break;
1410 default:
1411 break;
1412 }
1413
1414 /*
1415 * Defer printing the EEPROM type until after verifying the checksum
1416 * This allows the EEPROM type to be printed correctly in the case
1417 * that no EEPROM is attached.
1418 */
1419 /*
1420 * Validate the EEPROM checksum. If the checksum fails, flag
1421 * this for later, so we can fail future reads from the EEPROM.
1422 */
1423 if (wm_validate_eeprom_checksum(sc)) {
1424 /*
1425 * Read twice again because some PCI-e parts fail the
1426 * first check due to the link being in sleep state.
1427 */
1428 if (wm_validate_eeprom_checksum(sc))
1429 sc->sc_flags |= WM_F_EEPROM_INVALID;
1430 }
1431
1432 /* Set device properties (macflags) */
1433 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1434
1435 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1436 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1437 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1438 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1439 } else {
1440 if (sc->sc_flags & WM_F_EEPROM_SPI)
1441 eetype = "SPI";
1442 else
1443 eetype = "MicroWire";
1444 aprint_verbose_dev(sc->sc_dev,
1445 "%u word (%d address bits) %s EEPROM\n",
1446 1U << sc->sc_ee_addrbits,
1447 sc->sc_ee_addrbits, eetype);
1448 }
1449
1450 /*
1451 * Read the Ethernet address from the EEPROM, if not first found
1452 * in device properties.
1453 */
1454 ea = prop_dictionary_get(dict, "mac-addr");
1455 if (ea != NULL) {
1456 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1457 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1458 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1459 } else {
1460 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1461 sizeof(myea) / sizeof(myea[0]), myea)) {
1462 aprint_error_dev(sc->sc_dev,
1463 "unable to read Ethernet address\n");
1464 return;
1465 }
1466 enaddr[0] = myea[0] & 0xff;
1467 enaddr[1] = myea[0] >> 8;
1468 enaddr[2] = myea[1] & 0xff;
1469 enaddr[3] = myea[1] >> 8;
1470 enaddr[4] = myea[2] & 0xff;
1471 enaddr[5] = myea[2] >> 8;
1472 }
1473
1474 /*
1475 * Toggle the LSB of the MAC address on the second port
1476 * of the dual port controller.
1477 */
1478 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1479 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1480 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1481 enaddr[5] ^= 1;
1482 }
1483
1484 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1485 ether_sprintf(enaddr));
1486
1487 /*
1488 * Read the config info from the EEPROM, and set up various
1489 * bits in the control registers based on their contents.
1490 */
1491 pn = prop_dictionary_get(dict, "i82543-cfg1");
1492 if (pn != NULL) {
1493 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1494 cfg1 = (uint16_t) prop_number_integer_value(pn);
1495 } else {
1496 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1497 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1498 return;
1499 }
1500 }
1501
1502 pn = prop_dictionary_get(dict, "i82543-cfg2");
1503 if (pn != NULL) {
1504 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1505 cfg2 = (uint16_t) prop_number_integer_value(pn);
1506 } else {
1507 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1508 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1509 return;
1510 }
1511 }
1512
1513 if (sc->sc_type >= WM_T_82544) {
1514 pn = prop_dictionary_get(dict, "i82543-swdpin");
1515 if (pn != NULL) {
1516 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1517 swdpin = (uint16_t) prop_number_integer_value(pn);
1518 } else {
1519 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1520 aprint_error_dev(sc->sc_dev,
1521 "unable to read SWDPIN\n");
1522 return;
1523 }
1524 }
1525 }
1526
1527 if (cfg1 & EEPROM_CFG1_ILOS)
1528 sc->sc_ctrl |= CTRL_ILOS;
1529 if (sc->sc_type >= WM_T_82544) {
1530 sc->sc_ctrl |=
1531 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1532 CTRL_SWDPIO_SHIFT;
1533 sc->sc_ctrl |=
1534 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1535 CTRL_SWDPINS_SHIFT;
1536 } else {
1537 sc->sc_ctrl |=
1538 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1539 CTRL_SWDPIO_SHIFT;
1540 }
1541
1542 #if 0
1543 if (sc->sc_type >= WM_T_82544) {
1544 if (cfg1 & EEPROM_CFG1_IPS0)
1545 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1546 if (cfg1 & EEPROM_CFG1_IPS1)
1547 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1548 sc->sc_ctrl_ext |=
1549 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1550 CTRL_EXT_SWDPIO_SHIFT;
1551 sc->sc_ctrl_ext |=
1552 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1553 CTRL_EXT_SWDPINS_SHIFT;
1554 } else {
1555 sc->sc_ctrl_ext |=
1556 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1557 CTRL_EXT_SWDPIO_SHIFT;
1558 }
1559 #endif
1560
1561 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1562 #if 0
1563 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1564 #endif
1565
1566 /*
1567 * Set up some register offsets that are different between
1568 * the i82542 and the i82543 and later chips.
1569 */
1570 if (sc->sc_type < WM_T_82543) {
1571 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1572 sc->sc_tdt_reg = WMREG_OLD_TDT;
1573 } else {
1574 sc->sc_rdt_reg = WMREG_RDT;
1575 sc->sc_tdt_reg = WMREG_TDT;
1576 }
1577
1578 /*
1579 * Determine if we're TBI or GMII mode, and initialize the
1580 * media structures accordingly.
1581 */
1582 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1583 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1584 || sc->sc_type == WM_T_82573
1585 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1586 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1587 wm_gmii_mediainit(sc, wmp->wmp_product);
1588 } else if (sc->sc_type < WM_T_82543 ||
1589 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1590 if (wmp->wmp_flags & WMP_F_1000T)
1591 aprint_error_dev(sc->sc_dev,
1592 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1593 wm_tbi_mediainit(sc);
1594 } else {
1595 if (wmp->wmp_flags & WMP_F_1000X)
1596 aprint_error_dev(sc->sc_dev,
1597 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1598 wm_gmii_mediainit(sc, wmp->wmp_product);
1599 }
1600
1601 ifp = &sc->sc_ethercom.ec_if;
1602 xname = device_xname(sc->sc_dev);
1603 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1604 ifp->if_softc = sc;
1605 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1606 ifp->if_ioctl = wm_ioctl;
1607 ifp->if_start = wm_start;
1608 ifp->if_watchdog = wm_watchdog;
1609 ifp->if_init = wm_init;
1610 ifp->if_stop = wm_stop;
1611 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1612 IFQ_SET_READY(&ifp->if_snd);
1613
1614 /* Check for jumbo frame */
1615 switch (sc->sc_type) {
1616 case WM_T_82573:
1617 /* XXX limited to 9234 if ASPM is disabled */
1618 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1619 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1620 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1621 break;
1622 case WM_T_82571:
1623 case WM_T_82572:
1624 case WM_T_82574:
1625 case WM_T_80003:
1626 case WM_T_ICH9:
1627 case WM_T_ICH10:
1628 /* XXX limited to 9234 */
1629 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1630 break;
1631 case WM_T_PCH:
1632 /* XXX limited to 4096 */
1633 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1634 break;
1635 case WM_T_82542_2_0:
1636 case WM_T_82542_2_1:
1637 case WM_T_82583:
1638 case WM_T_ICH8:
1639 /* No support for jumbo frame */
1640 break;
1641 default:
1642 /* ETHER_MAX_LEN_JUMBO */
1643 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1644 break;
1645 }
1646
1647 /*
1648 * If we're a i82543 or greater, we can support VLANs.
1649 */
1650 if (sc->sc_type >= WM_T_82543)
1651 sc->sc_ethercom.ec_capabilities |=
1652 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1653
1654 /*
1655 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1656 * on i82543 and later.
1657 */
1658 if (sc->sc_type >= WM_T_82543) {
1659 ifp->if_capabilities |=
1660 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1661 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1662 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1663 IFCAP_CSUM_TCPv6_Tx |
1664 IFCAP_CSUM_UDPv6_Tx;
1665 }
1666
1667 /*
1668 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1669 *
1670 * 82541GI (8086:1076) ... no
1671 * 82572EI (8086:10b9) ... yes
1672 */
1673 if (sc->sc_type >= WM_T_82571) {
1674 ifp->if_capabilities |=
1675 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1676 }
1677
1678 /*
1679 * If we're a i82544 or greater (except i82547), we can do
1680 * TCP segmentation offload.
1681 */
1682 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1683 ifp->if_capabilities |= IFCAP_TSOv4;
1684 }
1685
1686 if (sc->sc_type >= WM_T_82571) {
1687 ifp->if_capabilities |= IFCAP_TSOv6;
1688 }
1689
1690 /*
1691 * Attach the interface.
1692 */
1693 if_attach(ifp);
1694 ether_ifattach(ifp, enaddr);
1695 #if NRND > 0
1696 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1697 #endif
1698
1699 #ifdef WM_EVENT_COUNTERS
1700 /* Attach event counters. */
1701 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1702 NULL, xname, "txsstall");
1703 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1704 NULL, xname, "txdstall");
1705 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1706 NULL, xname, "txfifo_stall");
1707 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1708 NULL, xname, "txdw");
1709 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1710 NULL, xname, "txqe");
1711 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1712 NULL, xname, "rxintr");
1713 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1714 NULL, xname, "linkintr");
1715
1716 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1717 NULL, xname, "rxipsum");
1718 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1719 NULL, xname, "rxtusum");
1720 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1721 NULL, xname, "txipsum");
1722 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1723 NULL, xname, "txtusum");
1724 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1725 NULL, xname, "txtusum6");
1726
1727 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1728 NULL, xname, "txtso");
1729 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1730 NULL, xname, "txtso6");
1731 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1732 NULL, xname, "txtsopain");
1733
1734 for (i = 0; i < WM_NTXSEGS; i++) {
1735 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1736 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1737 NULL, xname, wm_txseg_evcnt_names[i]);
1738 }
1739
1740 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1741 NULL, xname, "txdrop");
1742
1743 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1744 NULL, xname, "tu");
1745
1746 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1747 NULL, xname, "tx_xoff");
1748 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1749 NULL, xname, "tx_xon");
1750 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1751 NULL, xname, "rx_xoff");
1752 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1753 NULL, xname, "rx_xon");
1754 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1755 NULL, xname, "rx_macctl");
1756 #endif /* WM_EVENT_COUNTERS */
1757
1758 if (pmf_device_register(self, NULL, NULL))
1759 pmf_class_network_register(self, ifp);
1760 else
1761 aprint_error_dev(self, "couldn't establish power handler\n");
1762
1763 return;
1764
1765 /*
1766 * Free any resources we've allocated during the failed attach
1767 * attempt. Do this in reverse order and fall through.
1768 */
1769 fail_5:
1770 for (i = 0; i < WM_NRXDESC; i++) {
1771 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1772 bus_dmamap_destroy(sc->sc_dmat,
1773 sc->sc_rxsoft[i].rxs_dmamap);
1774 }
1775 fail_4:
1776 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1777 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1778 bus_dmamap_destroy(sc->sc_dmat,
1779 sc->sc_txsoft[i].txs_dmamap);
1780 }
1781 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1782 fail_3:
1783 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1784 fail_2:
1785 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1786 cdata_size);
1787 fail_1:
1788 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1789 fail_0:
1790 return;
1791 }
1792
1793 /*
1794 * wm_tx_offload:
1795 *
1796 * Set up TCP/IP checksumming parameters for the
1797 * specified packet.
1798 */
1799 static int
1800 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1801 uint8_t *fieldsp)
1802 {
1803 struct mbuf *m0 = txs->txs_mbuf;
1804 struct livengood_tcpip_ctxdesc *t;
1805 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1806 uint32_t ipcse;
1807 struct ether_header *eh;
1808 int offset, iphl;
1809 uint8_t fields;
1810
1811 /*
1812 * XXX It would be nice if the mbuf pkthdr had offset
1813 * fields for the protocol headers.
1814 */
1815
1816 eh = mtod(m0, struct ether_header *);
1817 switch (htons(eh->ether_type)) {
1818 case ETHERTYPE_IP:
1819 case ETHERTYPE_IPV6:
1820 offset = ETHER_HDR_LEN;
1821 break;
1822
1823 case ETHERTYPE_VLAN:
1824 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1825 break;
1826
1827 default:
1828 /*
1829 * Don't support this protocol or encapsulation.
1830 */
1831 *fieldsp = 0;
1832 *cmdp = 0;
1833 return (0);
1834 }
1835
1836 if ((m0->m_pkthdr.csum_flags &
1837 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1838 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1839 } else {
1840 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1841 }
1842 ipcse = offset + iphl - 1;
1843
1844 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1845 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1846 seg = 0;
1847 fields = 0;
1848
1849 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1850 int hlen = offset + iphl;
1851 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1852
1853 if (__predict_false(m0->m_len <
1854 (hlen + sizeof(struct tcphdr)))) {
1855 /*
1856 * TCP/IP headers are not in the first mbuf; we need
1857 * to do this the slow and painful way. Let's just
1858 * hope this doesn't happen very often.
1859 */
1860 struct tcphdr th;
1861
1862 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1863
1864 m_copydata(m0, hlen, sizeof(th), &th);
1865 if (v4) {
1866 struct ip ip;
1867
1868 m_copydata(m0, offset, sizeof(ip), &ip);
1869 ip.ip_len = 0;
1870 m_copyback(m0,
1871 offset + offsetof(struct ip, ip_len),
1872 sizeof(ip.ip_len), &ip.ip_len);
1873 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1874 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1875 } else {
1876 struct ip6_hdr ip6;
1877
1878 m_copydata(m0, offset, sizeof(ip6), &ip6);
1879 ip6.ip6_plen = 0;
1880 m_copyback(m0,
1881 offset + offsetof(struct ip6_hdr, ip6_plen),
1882 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1883 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1884 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1885 }
1886 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1887 sizeof(th.th_sum), &th.th_sum);
1888
1889 hlen += th.th_off << 2;
1890 } else {
1891 /*
1892 * TCP/IP headers are in the first mbuf; we can do
1893 * this the easy way.
1894 */
1895 struct tcphdr *th;
1896
1897 if (v4) {
1898 struct ip *ip =
1899 (void *)(mtod(m0, char *) + offset);
1900 th = (void *)(mtod(m0, char *) + hlen);
1901
1902 ip->ip_len = 0;
1903 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1904 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1905 } else {
1906 struct ip6_hdr *ip6 =
1907 (void *)(mtod(m0, char *) + offset);
1908 th = (void *)(mtod(m0, char *) + hlen);
1909
1910 ip6->ip6_plen = 0;
1911 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1912 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1913 }
1914 hlen += th->th_off << 2;
1915 }
1916
1917 if (v4) {
1918 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1919 cmdlen |= WTX_TCPIP_CMD_IP;
1920 } else {
1921 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1922 ipcse = 0;
1923 }
1924 cmd |= WTX_TCPIP_CMD_TSE;
1925 cmdlen |= WTX_TCPIP_CMD_TSE |
1926 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1927 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1928 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1929 }
1930
1931 /*
1932 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1933 * offload feature, if we load the context descriptor, we
1934 * MUST provide valid values for IPCSS and TUCSS fields.
1935 */
1936
1937 ipcs = WTX_TCPIP_IPCSS(offset) |
1938 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1939 WTX_TCPIP_IPCSE(ipcse);
1940 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1941 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1942 fields |= WTX_IXSM;
1943 }
1944
1945 offset += iphl;
1946
1947 if (m0->m_pkthdr.csum_flags &
1948 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1949 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1950 fields |= WTX_TXSM;
1951 tucs = WTX_TCPIP_TUCSS(offset) |
1952 WTX_TCPIP_TUCSO(offset +
1953 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1954 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1955 } else if ((m0->m_pkthdr.csum_flags &
1956 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1957 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1958 fields |= WTX_TXSM;
1959 tucs = WTX_TCPIP_TUCSS(offset) |
1960 WTX_TCPIP_TUCSO(offset +
1961 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1962 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1963 } else {
1964 /* Just initialize it to a valid TCP context. */
1965 tucs = WTX_TCPIP_TUCSS(offset) |
1966 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1967 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1968 }
1969
1970 /* Fill in the context descriptor. */
1971 t = (struct livengood_tcpip_ctxdesc *)
1972 &sc->sc_txdescs[sc->sc_txnext];
1973 t->tcpip_ipcs = htole32(ipcs);
1974 t->tcpip_tucs = htole32(tucs);
1975 t->tcpip_cmdlen = htole32(cmdlen);
1976 t->tcpip_seg = htole32(seg);
1977 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1978
1979 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1980 txs->txs_ndesc++;
1981
1982 *cmdp = cmd;
1983 *fieldsp = fields;
1984
1985 return (0);
1986 }
1987
1988 static void
1989 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1990 {
1991 struct mbuf *m;
1992 int i;
1993
1994 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1995 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1996 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1997 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1998 m->m_data, m->m_len, m->m_flags);
1999 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2000 i, i == 1 ? "" : "s");
2001 }
2002
2003 /*
2004 * wm_82547_txfifo_stall:
2005 *
2006 * Callout used to wait for the 82547 Tx FIFO to drain,
2007 * reset the FIFO pointers, and restart packet transmission.
2008 */
2009 static void
2010 wm_82547_txfifo_stall(void *arg)
2011 {
2012 struct wm_softc *sc = arg;
2013 int s;
2014
2015 s = splnet();
2016
2017 if (sc->sc_txfifo_stall) {
2018 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2019 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2020 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2021 /*
2022 * Packets have drained. Stop transmitter, reset
2023 * FIFO pointers, restart transmitter, and kick
2024 * the packet queue.
2025 */
2026 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2027 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2028 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2029 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2030 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2031 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2032 CSR_WRITE(sc, WMREG_TCTL, tctl);
2033 CSR_WRITE_FLUSH(sc);
2034
2035 sc->sc_txfifo_head = 0;
2036 sc->sc_txfifo_stall = 0;
2037 wm_start(&sc->sc_ethercom.ec_if);
2038 } else {
2039 /*
2040 * Still waiting for packets to drain; try again in
2041 * another tick.
2042 */
2043 callout_schedule(&sc->sc_txfifo_ch, 1);
2044 }
2045 }
2046
2047 splx(s);
2048 }
2049
2050 /*
2051 * wm_82547_txfifo_bugchk:
2052 *
2053 * Check for bug condition in the 82547 Tx FIFO. We need to
2054 * prevent enqueueing a packet that would wrap around the end
2055 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2056 *
2057 * We do this by checking the amount of space before the end
2058 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2059 * the Tx FIFO, wait for all remaining packets to drain, reset
2060 * the internal FIFO pointers to the beginning, and restart
2061 * transmission on the interface.
2062 */
2063 #define WM_FIFO_HDR 0x10
2064 #define WM_82547_PAD_LEN 0x3e0
2065 static int
2066 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2067 {
2068 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2069 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2070
2071 /* Just return if already stalled. */
2072 if (sc->sc_txfifo_stall)
2073 return (1);
2074
2075 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2076 /* Stall only occurs in half-duplex mode. */
2077 goto send_packet;
2078 }
2079
2080 if (len >= WM_82547_PAD_LEN + space) {
2081 sc->sc_txfifo_stall = 1;
2082 callout_schedule(&sc->sc_txfifo_ch, 1);
2083 return (1);
2084 }
2085
2086 send_packet:
2087 sc->sc_txfifo_head += len;
2088 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2089 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2090
2091 return (0);
2092 }
2093
2094 /*
2095 * wm_start: [ifnet interface function]
2096 *
2097 * Start packet transmission on the interface.
2098 */
2099 static void
2100 wm_start(struct ifnet *ifp)
2101 {
2102 struct wm_softc *sc = ifp->if_softc;
2103 struct mbuf *m0;
2104 struct m_tag *mtag;
2105 struct wm_txsoft *txs;
2106 bus_dmamap_t dmamap;
2107 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2108 bus_addr_t curaddr;
2109 bus_size_t seglen, curlen;
2110 uint32_t cksumcmd;
2111 uint8_t cksumfields;
2112
2113 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2114 return;
2115
2116 /*
2117 * Remember the previous number of free descriptors.
2118 */
2119 ofree = sc->sc_txfree;
2120
2121 /*
2122 * Loop through the send queue, setting up transmit descriptors
2123 * until we drain the queue, or use up all available transmit
2124 * descriptors.
2125 */
2126 for (;;) {
2127 /* Grab a packet off the queue. */
2128 IFQ_POLL(&ifp->if_snd, m0);
2129 if (m0 == NULL)
2130 break;
2131
2132 DPRINTF(WM_DEBUG_TX,
2133 ("%s: TX: have packet to transmit: %p\n",
2134 device_xname(sc->sc_dev), m0));
2135
2136 /* Get a work queue entry. */
2137 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2138 wm_txintr(sc);
2139 if (sc->sc_txsfree == 0) {
2140 DPRINTF(WM_DEBUG_TX,
2141 ("%s: TX: no free job descriptors\n",
2142 device_xname(sc->sc_dev)));
2143 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2144 break;
2145 }
2146 }
2147
2148 txs = &sc->sc_txsoft[sc->sc_txsnext];
2149 dmamap = txs->txs_dmamap;
2150
2151 use_tso = (m0->m_pkthdr.csum_flags &
2152 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2153
2154 /*
2155 * So says the Linux driver:
2156 * The controller does a simple calculation to make sure
2157 * there is enough room in the FIFO before initiating the
2158 * DMA for each buffer. The calc is:
2159 * 4 = ceil(buffer len / MSS)
2160 * To make sure we don't overrun the FIFO, adjust the max
2161 * buffer len if the MSS drops.
2162 */
2163 dmamap->dm_maxsegsz =
2164 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2165 ? m0->m_pkthdr.segsz << 2
2166 : WTX_MAX_LEN;
2167
2168 /*
2169 * Load the DMA map. If this fails, the packet either
2170 * didn't fit in the allotted number of segments, or we
2171 * were short on resources. For the too-many-segments
2172 * case, we simply report an error and drop the packet,
2173 * since we can't sanely copy a jumbo packet to a single
2174 * buffer.
2175 */
2176 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2177 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2178 if (error) {
2179 if (error == EFBIG) {
2180 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2181 log(LOG_ERR, "%s: Tx packet consumes too many "
2182 "DMA segments, dropping...\n",
2183 device_xname(sc->sc_dev));
2184 IFQ_DEQUEUE(&ifp->if_snd, m0);
2185 wm_dump_mbuf_chain(sc, m0);
2186 m_freem(m0);
2187 continue;
2188 }
2189 /*
2190 * Short on resources, just stop for now.
2191 */
2192 DPRINTF(WM_DEBUG_TX,
2193 ("%s: TX: dmamap load failed: %d\n",
2194 device_xname(sc->sc_dev), error));
2195 break;
2196 }
2197
2198 segs_needed = dmamap->dm_nsegs;
2199 if (use_tso) {
2200 /* For sentinel descriptor; see below. */
2201 segs_needed++;
2202 }
2203
2204 /*
2205 * Ensure we have enough descriptors free to describe
2206 * the packet. Note, we always reserve one descriptor
2207 * at the end of the ring due to the semantics of the
2208 * TDT register, plus one more in the event we need
2209 * to load offload context.
2210 */
2211 if (segs_needed > sc->sc_txfree - 2) {
2212 /*
2213 * Not enough free descriptors to transmit this
2214 * packet. We haven't committed anything yet,
2215 * so just unload the DMA map, put the packet
2216 * pack on the queue, and punt. Notify the upper
2217 * layer that there are no more slots left.
2218 */
2219 DPRINTF(WM_DEBUG_TX,
2220 ("%s: TX: need %d (%d) descriptors, have %d\n",
2221 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2222 segs_needed, sc->sc_txfree - 1));
2223 ifp->if_flags |= IFF_OACTIVE;
2224 bus_dmamap_unload(sc->sc_dmat, dmamap);
2225 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2226 break;
2227 }
2228
2229 /*
2230 * Check for 82547 Tx FIFO bug. We need to do this
2231 * once we know we can transmit the packet, since we
2232 * do some internal FIFO space accounting here.
2233 */
2234 if (sc->sc_type == WM_T_82547 &&
2235 wm_82547_txfifo_bugchk(sc, m0)) {
2236 DPRINTF(WM_DEBUG_TX,
2237 ("%s: TX: 82547 Tx FIFO bug detected\n",
2238 device_xname(sc->sc_dev)));
2239 ifp->if_flags |= IFF_OACTIVE;
2240 bus_dmamap_unload(sc->sc_dmat, dmamap);
2241 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2242 break;
2243 }
2244
2245 IFQ_DEQUEUE(&ifp->if_snd, m0);
2246
2247 /*
2248 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2249 */
2250
2251 DPRINTF(WM_DEBUG_TX,
2252 ("%s: TX: packet has %d (%d) DMA segments\n",
2253 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2254
2255 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2256
2257 /*
2258 * Store a pointer to the packet so that we can free it
2259 * later.
2260 *
2261 * Initially, we consider the number of descriptors the
2262 * packet uses the number of DMA segments. This may be
2263 * incremented by 1 if we do checksum offload (a descriptor
2264 * is used to set the checksum context).
2265 */
2266 txs->txs_mbuf = m0;
2267 txs->txs_firstdesc = sc->sc_txnext;
2268 txs->txs_ndesc = segs_needed;
2269
2270 /* Set up offload parameters for this packet. */
2271 if (m0->m_pkthdr.csum_flags &
2272 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2273 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2274 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2275 if (wm_tx_offload(sc, txs, &cksumcmd,
2276 &cksumfields) != 0) {
2277 /* Error message already displayed. */
2278 bus_dmamap_unload(sc->sc_dmat, dmamap);
2279 continue;
2280 }
2281 } else {
2282 cksumcmd = 0;
2283 cksumfields = 0;
2284 }
2285
2286 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2287
2288 /* Sync the DMA map. */
2289 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2290 BUS_DMASYNC_PREWRITE);
2291
2292 /*
2293 * Initialize the transmit descriptor.
2294 */
2295 for (nexttx = sc->sc_txnext, seg = 0;
2296 seg < dmamap->dm_nsegs; seg++) {
2297 for (seglen = dmamap->dm_segs[seg].ds_len,
2298 curaddr = dmamap->dm_segs[seg].ds_addr;
2299 seglen != 0;
2300 curaddr += curlen, seglen -= curlen,
2301 nexttx = WM_NEXTTX(sc, nexttx)) {
2302 curlen = seglen;
2303
2304 /*
2305 * So says the Linux driver:
2306 * Work around for premature descriptor
2307 * write-backs in TSO mode. Append a
2308 * 4-byte sentinel descriptor.
2309 */
2310 if (use_tso &&
2311 seg == dmamap->dm_nsegs - 1 &&
2312 curlen > 8)
2313 curlen -= 4;
2314
2315 wm_set_dma_addr(
2316 &sc->sc_txdescs[nexttx].wtx_addr,
2317 curaddr);
2318 sc->sc_txdescs[nexttx].wtx_cmdlen =
2319 htole32(cksumcmd | curlen);
2320 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2321 0;
2322 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2323 cksumfields;
2324 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2325 lasttx = nexttx;
2326
2327 DPRINTF(WM_DEBUG_TX,
2328 ("%s: TX: desc %d: low 0x%08lx, "
2329 "len 0x%04x\n",
2330 device_xname(sc->sc_dev), nexttx,
2331 curaddr & 0xffffffffUL, (unsigned)curlen));
2332 }
2333 }
2334
2335 KASSERT(lasttx != -1);
2336
2337 /*
2338 * Set up the command byte on the last descriptor of
2339 * the packet. If we're in the interrupt delay window,
2340 * delay the interrupt.
2341 */
2342 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2343 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2344
2345 /*
2346 * If VLANs are enabled and the packet has a VLAN tag, set
2347 * up the descriptor to encapsulate the packet for us.
2348 *
2349 * This is only valid on the last descriptor of the packet.
2350 */
2351 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2352 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2353 htole32(WTX_CMD_VLE);
2354 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2355 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2356 }
2357
2358 txs->txs_lastdesc = lasttx;
2359
2360 DPRINTF(WM_DEBUG_TX,
2361 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2362 device_xname(sc->sc_dev),
2363 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2364
2365 /* Sync the descriptors we're using. */
2366 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2367 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2368
2369 /* Give the packet to the chip. */
2370 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2371
2372 DPRINTF(WM_DEBUG_TX,
2373 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2374
2375 DPRINTF(WM_DEBUG_TX,
2376 ("%s: TX: finished transmitting packet, job %d\n",
2377 device_xname(sc->sc_dev), sc->sc_txsnext));
2378
2379 /* Advance the tx pointer. */
2380 sc->sc_txfree -= txs->txs_ndesc;
2381 sc->sc_txnext = nexttx;
2382
2383 sc->sc_txsfree--;
2384 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2385
2386 #if NBPFILTER > 0
2387 /* Pass the packet to any BPF listeners. */
2388 if (ifp->if_bpf)
2389 bpf_mtap(ifp->if_bpf, m0);
2390 #endif /* NBPFILTER > 0 */
2391 }
2392
2393 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2394 /* No more slots; notify upper layer. */
2395 ifp->if_flags |= IFF_OACTIVE;
2396 }
2397
2398 if (sc->sc_txfree != ofree) {
2399 /* Set a watchdog timer in case the chip flakes out. */
2400 ifp->if_timer = 5;
2401 }
2402 }
2403
2404 /*
2405 * wm_watchdog: [ifnet interface function]
2406 *
2407 * Watchdog timer handler.
2408 */
2409 static void
2410 wm_watchdog(struct ifnet *ifp)
2411 {
2412 struct wm_softc *sc = ifp->if_softc;
2413
2414 /*
2415 * Since we're using delayed interrupts, sweep up
2416 * before we report an error.
2417 */
2418 wm_txintr(sc);
2419
2420 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2421 log(LOG_ERR,
2422 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2423 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2424 sc->sc_txnext);
2425 ifp->if_oerrors++;
2426
2427 /* Reset the interface. */
2428 (void) wm_init(ifp);
2429 }
2430
2431 /* Try to get more packets going. */
2432 wm_start(ifp);
2433 }
2434
2435 /*
2436 * wm_ioctl: [ifnet interface function]
2437 *
2438 * Handle control requests from the operator.
2439 */
2440 static int
2441 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2442 {
2443 struct wm_softc *sc = ifp->if_softc;
2444 struct ifreq *ifr = (struct ifreq *) data;
2445 struct ifaddr *ifa = (struct ifaddr *)data;
2446 struct sockaddr_dl *sdl;
2447 int diff, s, error;
2448
2449 s = splnet();
2450
2451 switch (cmd) {
2452 case SIOCSIFFLAGS:
2453 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2454 break;
2455 if (ifp->if_flags & IFF_UP) {
2456 diff = (ifp->if_flags ^ sc->sc_if_flags)
2457 & (IFF_PROMISC | IFF_ALLMULTI);
2458 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2459 /*
2460 * If the difference bettween last flag and
2461 * new flag is only IFF_PROMISC or
2462 * IFF_ALLMULTI, set multicast filter only
2463 * (don't reset to prevent link down).
2464 */
2465 wm_set_filter(sc);
2466 } else {
2467 /*
2468 * Reset the interface to pick up changes in
2469 * any other flags that affect the hardware
2470 * state.
2471 */
2472 wm_init(ifp);
2473 }
2474 } else {
2475 if (ifp->if_flags & IFF_RUNNING)
2476 wm_stop(ifp, 1);
2477 }
2478 sc->sc_if_flags = ifp->if_flags;
2479 error = 0;
2480 break;
2481 case SIOCSIFMEDIA:
2482 case SIOCGIFMEDIA:
2483 /* Flow control requires full-duplex mode. */
2484 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2485 (ifr->ifr_media & IFM_FDX) == 0)
2486 ifr->ifr_media &= ~IFM_ETH_FMASK;
2487 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2488 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2489 /* We can do both TXPAUSE and RXPAUSE. */
2490 ifr->ifr_media |=
2491 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2492 }
2493 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2494 }
2495 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2496 break;
2497 case SIOCINITIFADDR:
2498 if (ifa->ifa_addr->sa_family == AF_LINK) {
2499 sdl = satosdl(ifp->if_dl->ifa_addr);
2500 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2501 LLADDR(satosdl(ifa->ifa_addr)),
2502 ifp->if_addrlen);
2503 /* unicast address is first multicast entry */
2504 wm_set_filter(sc);
2505 error = 0;
2506 break;
2507 }
2508 /* Fall through for rest */
2509 default:
2510 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2511 break;
2512
2513 error = 0;
2514
2515 if (cmd == SIOCSIFCAP)
2516 error = (*ifp->if_init)(ifp);
2517 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2518 ;
2519 else if (ifp->if_flags & IFF_RUNNING) {
2520 /*
2521 * Multicast list has changed; set the hardware filter
2522 * accordingly.
2523 */
2524 wm_set_filter(sc);
2525 }
2526 break;
2527 }
2528
2529 /* Try to get more packets going. */
2530 wm_start(ifp);
2531
2532 splx(s);
2533 return (error);
2534 }
2535
2536 /*
2537 * wm_intr:
2538 *
2539 * Interrupt service routine.
2540 */
2541 static int
2542 wm_intr(void *arg)
2543 {
2544 struct wm_softc *sc = arg;
2545 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2546 uint32_t icr;
2547 int handled = 0;
2548
2549 while (1 /* CONSTCOND */) {
2550 icr = CSR_READ(sc, WMREG_ICR);
2551 if ((icr & sc->sc_icr) == 0)
2552 break;
2553 #if 0 /*NRND > 0*/
2554 if (RND_ENABLED(&sc->rnd_source))
2555 rnd_add_uint32(&sc->rnd_source, icr);
2556 #endif
2557
2558 handled = 1;
2559
2560 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2561 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2562 DPRINTF(WM_DEBUG_RX,
2563 ("%s: RX: got Rx intr 0x%08x\n",
2564 device_xname(sc->sc_dev),
2565 icr & (ICR_RXDMT0|ICR_RXT0)));
2566 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2567 }
2568 #endif
2569 wm_rxintr(sc);
2570
2571 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2572 if (icr & ICR_TXDW) {
2573 DPRINTF(WM_DEBUG_TX,
2574 ("%s: TX: got TXDW interrupt\n",
2575 device_xname(sc->sc_dev)));
2576 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2577 }
2578 #endif
2579 wm_txintr(sc);
2580
2581 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2582 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2583 wm_linkintr(sc, icr);
2584 }
2585
2586 if (icr & ICR_RXO) {
2587 ifp->if_ierrors++;
2588 #if defined(WM_DEBUG)
2589 log(LOG_WARNING, "%s: Receive overrun\n",
2590 device_xname(sc->sc_dev));
2591 #endif /* defined(WM_DEBUG) */
2592 }
2593 }
2594
2595 if (handled) {
2596 /* Try to get more packets going. */
2597 wm_start(ifp);
2598 }
2599
2600 return (handled);
2601 }
2602
2603 /*
2604 * wm_txintr:
2605 *
2606 * Helper; handle transmit interrupts.
2607 */
2608 static void
2609 wm_txintr(struct wm_softc *sc)
2610 {
2611 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2612 struct wm_txsoft *txs;
2613 uint8_t status;
2614 int i;
2615
2616 ifp->if_flags &= ~IFF_OACTIVE;
2617
2618 /*
2619 * Go through the Tx list and free mbufs for those
2620 * frames which have been transmitted.
2621 */
2622 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2623 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2624 txs = &sc->sc_txsoft[i];
2625
2626 DPRINTF(WM_DEBUG_TX,
2627 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2628
2629 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2630 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2631
2632 status =
2633 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2634 if ((status & WTX_ST_DD) == 0) {
2635 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2636 BUS_DMASYNC_PREREAD);
2637 break;
2638 }
2639
2640 DPRINTF(WM_DEBUG_TX,
2641 ("%s: TX: job %d done: descs %d..%d\n",
2642 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2643 txs->txs_lastdesc));
2644
2645 /*
2646 * XXX We should probably be using the statistics
2647 * XXX registers, but I don't know if they exist
2648 * XXX on chips before the i82544.
2649 */
2650
2651 #ifdef WM_EVENT_COUNTERS
2652 if (status & WTX_ST_TU)
2653 WM_EVCNT_INCR(&sc->sc_ev_tu);
2654 #endif /* WM_EVENT_COUNTERS */
2655
2656 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2657 ifp->if_oerrors++;
2658 if (status & WTX_ST_LC)
2659 log(LOG_WARNING, "%s: late collision\n",
2660 device_xname(sc->sc_dev));
2661 else if (status & WTX_ST_EC) {
2662 ifp->if_collisions += 16;
2663 log(LOG_WARNING, "%s: excessive collisions\n",
2664 device_xname(sc->sc_dev));
2665 }
2666 } else
2667 ifp->if_opackets++;
2668
2669 sc->sc_txfree += txs->txs_ndesc;
2670 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2671 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2672 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2673 m_freem(txs->txs_mbuf);
2674 txs->txs_mbuf = NULL;
2675 }
2676
2677 /* Update the dirty transmit buffer pointer. */
2678 sc->sc_txsdirty = i;
2679 DPRINTF(WM_DEBUG_TX,
2680 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2681
2682 /*
2683 * If there are no more pending transmissions, cancel the watchdog
2684 * timer.
2685 */
2686 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2687 ifp->if_timer = 0;
2688 }
2689
2690 /*
2691 * wm_rxintr:
2692 *
2693 * Helper; handle receive interrupts.
2694 */
2695 static void
2696 wm_rxintr(struct wm_softc *sc)
2697 {
2698 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2699 struct wm_rxsoft *rxs;
2700 struct mbuf *m;
2701 int i, len;
2702 uint8_t status, errors;
2703 uint16_t vlantag;
2704
2705 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2706 rxs = &sc->sc_rxsoft[i];
2707
2708 DPRINTF(WM_DEBUG_RX,
2709 ("%s: RX: checking descriptor %d\n",
2710 device_xname(sc->sc_dev), i));
2711
2712 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2713
2714 status = sc->sc_rxdescs[i].wrx_status;
2715 errors = sc->sc_rxdescs[i].wrx_errors;
2716 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2717 vlantag = sc->sc_rxdescs[i].wrx_special;
2718
2719 if ((status & WRX_ST_DD) == 0) {
2720 /*
2721 * We have processed all of the receive descriptors.
2722 */
2723 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2724 break;
2725 }
2726
2727 if (__predict_false(sc->sc_rxdiscard)) {
2728 DPRINTF(WM_DEBUG_RX,
2729 ("%s: RX: discarding contents of descriptor %d\n",
2730 device_xname(sc->sc_dev), i));
2731 WM_INIT_RXDESC(sc, i);
2732 if (status & WRX_ST_EOP) {
2733 /* Reset our state. */
2734 DPRINTF(WM_DEBUG_RX,
2735 ("%s: RX: resetting rxdiscard -> 0\n",
2736 device_xname(sc->sc_dev)));
2737 sc->sc_rxdiscard = 0;
2738 }
2739 continue;
2740 }
2741
2742 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2743 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2744
2745 m = rxs->rxs_mbuf;
2746
2747 /*
2748 * Add a new receive buffer to the ring, unless of
2749 * course the length is zero. Treat the latter as a
2750 * failed mapping.
2751 */
2752 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2753 /*
2754 * Failed, throw away what we've done so
2755 * far, and discard the rest of the packet.
2756 */
2757 ifp->if_ierrors++;
2758 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2759 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2760 WM_INIT_RXDESC(sc, i);
2761 if ((status & WRX_ST_EOP) == 0)
2762 sc->sc_rxdiscard = 1;
2763 if (sc->sc_rxhead != NULL)
2764 m_freem(sc->sc_rxhead);
2765 WM_RXCHAIN_RESET(sc);
2766 DPRINTF(WM_DEBUG_RX,
2767 ("%s: RX: Rx buffer allocation failed, "
2768 "dropping packet%s\n", device_xname(sc->sc_dev),
2769 sc->sc_rxdiscard ? " (discard)" : ""));
2770 continue;
2771 }
2772
2773 m->m_len = len;
2774 sc->sc_rxlen += len;
2775 DPRINTF(WM_DEBUG_RX,
2776 ("%s: RX: buffer at %p len %d\n",
2777 device_xname(sc->sc_dev), m->m_data, len));
2778
2779 /*
2780 * If this is not the end of the packet, keep
2781 * looking.
2782 */
2783 if ((status & WRX_ST_EOP) == 0) {
2784 WM_RXCHAIN_LINK(sc, m);
2785 DPRINTF(WM_DEBUG_RX,
2786 ("%s: RX: not yet EOP, rxlen -> %d\n",
2787 device_xname(sc->sc_dev), sc->sc_rxlen));
2788 continue;
2789 }
2790
2791 /*
2792 * Okay, we have the entire packet now. The chip is
2793 * configured to include the FCS (not all chips can
2794 * be configured to strip it), so we need to trim it.
2795 * May need to adjust length of previous mbuf in the
2796 * chain if the current mbuf is too short.
2797 */
2798 if (m->m_len < ETHER_CRC_LEN) {
2799 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2800 m->m_len = 0;
2801 } else {
2802 m->m_len -= ETHER_CRC_LEN;
2803 }
2804 len = sc->sc_rxlen - ETHER_CRC_LEN;
2805
2806 WM_RXCHAIN_LINK(sc, m);
2807
2808 *sc->sc_rxtailp = NULL;
2809 m = sc->sc_rxhead;
2810
2811 WM_RXCHAIN_RESET(sc);
2812
2813 DPRINTF(WM_DEBUG_RX,
2814 ("%s: RX: have entire packet, len -> %d\n",
2815 device_xname(sc->sc_dev), len));
2816
2817 /*
2818 * If an error occurred, update stats and drop the packet.
2819 */
2820 if (errors &
2821 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2822 ifp->if_ierrors++;
2823 if (errors & WRX_ER_SE)
2824 log(LOG_WARNING, "%s: symbol error\n",
2825 device_xname(sc->sc_dev));
2826 else if (errors & WRX_ER_SEQ)
2827 log(LOG_WARNING, "%s: receive sequence error\n",
2828 device_xname(sc->sc_dev));
2829 else if (errors & WRX_ER_CE)
2830 log(LOG_WARNING, "%s: CRC error\n",
2831 device_xname(sc->sc_dev));
2832 m_freem(m);
2833 continue;
2834 }
2835
2836 /*
2837 * No errors. Receive the packet.
2838 */
2839 m->m_pkthdr.rcvif = ifp;
2840 m->m_pkthdr.len = len;
2841
2842 /*
2843 * If VLANs are enabled, VLAN packets have been unwrapped
2844 * for us. Associate the tag with the packet.
2845 */
2846 if ((status & WRX_ST_VP) != 0) {
2847 VLAN_INPUT_TAG(ifp, m,
2848 le16toh(vlantag),
2849 continue);
2850 }
2851
2852 /*
2853 * Set up checksum info for this packet.
2854 */
2855 if ((status & WRX_ST_IXSM) == 0) {
2856 if (status & WRX_ST_IPCS) {
2857 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2858 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2859 if (errors & WRX_ER_IPE)
2860 m->m_pkthdr.csum_flags |=
2861 M_CSUM_IPv4_BAD;
2862 }
2863 if (status & WRX_ST_TCPCS) {
2864 /*
2865 * Note: we don't know if this was TCP or UDP,
2866 * so we just set both bits, and expect the
2867 * upper layers to deal.
2868 */
2869 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2870 m->m_pkthdr.csum_flags |=
2871 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2872 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2873 if (errors & WRX_ER_TCPE)
2874 m->m_pkthdr.csum_flags |=
2875 M_CSUM_TCP_UDP_BAD;
2876 }
2877 }
2878
2879 ifp->if_ipackets++;
2880
2881 #if NBPFILTER > 0
2882 /* Pass this up to any BPF listeners. */
2883 if (ifp->if_bpf)
2884 bpf_mtap(ifp->if_bpf, m);
2885 #endif /* NBPFILTER > 0 */
2886
2887 /* Pass it on. */
2888 (*ifp->if_input)(ifp, m);
2889 }
2890
2891 /* Update the receive pointer. */
2892 sc->sc_rxptr = i;
2893
2894 DPRINTF(WM_DEBUG_RX,
2895 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2896 }
2897
2898 /*
2899 * wm_linkintr:
2900 *
2901 * Helper; handle link interrupts.
2902 */
2903 static void
2904 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2905 {
2906 uint32_t status;
2907
2908 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2909 __func__));
2910 /*
2911 * If we get a link status interrupt on a 1000BASE-T
2912 * device, just fall into the normal MII tick path.
2913 */
2914 if (sc->sc_flags & WM_F_HAS_MII) {
2915 if (icr & ICR_LSC) {
2916 DPRINTF(WM_DEBUG_LINK,
2917 ("%s: LINK: LSC -> mii_tick\n",
2918 device_xname(sc->sc_dev)));
2919 mii_tick(&sc->sc_mii);
2920 if (sc->sc_type == WM_T_82543) {
2921 int miistatus, active;
2922
2923 /*
2924 * With 82543, we need to force speed and
2925 * duplex on the MAC equal to what the PHY
2926 * speed and duplex configuration is.
2927 */
2928 miistatus = sc->sc_mii.mii_media_status;
2929
2930 if (miistatus & IFM_ACTIVE) {
2931 active = sc->sc_mii.mii_media_active;
2932 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2933 | CTRL_FD);
2934 switch (IFM_SUBTYPE(active)) {
2935 case IFM_10_T:
2936 sc->sc_ctrl |= CTRL_SPEED_10;
2937 break;
2938 case IFM_100_TX:
2939 sc->sc_ctrl |= CTRL_SPEED_100;
2940 break;
2941 case IFM_1000_T:
2942 sc->sc_ctrl |= CTRL_SPEED_1000;
2943 break;
2944 default:
2945 /*
2946 * fiber?
2947 * Shoud not enter here.
2948 */
2949 printf("unknown media (%x)\n",
2950 active);
2951 break;
2952 }
2953 if (active & IFM_FDX)
2954 sc->sc_ctrl |= CTRL_FD;
2955 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2956 }
2957 }
2958 } else if (icr & ICR_RXSEQ) {
2959 DPRINTF(WM_DEBUG_LINK,
2960 ("%s: LINK Receive sequence error\n",
2961 device_xname(sc->sc_dev)));
2962 }
2963 return;
2964 }
2965
2966 /* TBI mode */
2967 status = CSR_READ(sc, WMREG_STATUS);
2968 if (icr & ICR_LSC) {
2969 if (status & STATUS_LU) {
2970 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2971 device_xname(sc->sc_dev),
2972 (status & STATUS_FD) ? "FDX" : "HDX"));
2973 /*
2974 * NOTE: CTRL will update TFCE and RFCE automatically,
2975 * so we should update sc->sc_ctrl
2976 */
2977
2978 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2979 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2980 sc->sc_fcrtl &= ~FCRTL_XONE;
2981 if (status & STATUS_FD)
2982 sc->sc_tctl |=
2983 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2984 else
2985 sc->sc_tctl |=
2986 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2987 if (sc->sc_ctrl & CTRL_TFCE)
2988 sc->sc_fcrtl |= FCRTL_XONE;
2989 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2990 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2991 WMREG_OLD_FCRTL : WMREG_FCRTL,
2992 sc->sc_fcrtl);
2993 sc->sc_tbi_linkup = 1;
2994 } else {
2995 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2996 device_xname(sc->sc_dev)));
2997 sc->sc_tbi_linkup = 0;
2998 }
2999 wm_tbi_set_linkled(sc);
3000 } else if (icr & ICR_RXCFG) {
3001 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3002 device_xname(sc->sc_dev)));
3003 sc->sc_tbi_nrxcfg++;
3004 wm_check_for_link(sc);
3005 } else if (icr & ICR_RXSEQ) {
3006 DPRINTF(WM_DEBUG_LINK,
3007 ("%s: LINK: Receive sequence error\n",
3008 device_xname(sc->sc_dev)));
3009 }
3010 }
3011
3012 /*
3013 * wm_tick:
3014 *
3015 * One second timer, used to check link status, sweep up
3016 * completed transmit jobs, etc.
3017 */
3018 static void
3019 wm_tick(void *arg)
3020 {
3021 struct wm_softc *sc = arg;
3022 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3023 int s;
3024
3025 s = splnet();
3026
3027 if (sc->sc_type >= WM_T_82542_2_1) {
3028 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3029 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3030 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3031 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3032 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3033 }
3034
3035 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3036 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3037
3038 if (sc->sc_flags & WM_F_HAS_MII)
3039 mii_tick(&sc->sc_mii);
3040 else
3041 wm_tbi_check_link(sc);
3042
3043 splx(s);
3044
3045 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3046 }
3047
3048 /*
3049 * wm_reset:
3050 *
3051 * Reset the i82542 chip.
3052 */
3053 static void
3054 wm_reset(struct wm_softc *sc)
3055 {
3056 int phy_reset = 0;
3057 uint32_t reg, func, mask;
3058 int i;
3059
3060 /*
3061 * Allocate on-chip memory according to the MTU size.
3062 * The Packet Buffer Allocation register must be written
3063 * before the chip is reset.
3064 */
3065 switch (sc->sc_type) {
3066 case WM_T_82547:
3067 case WM_T_82547_2:
3068 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3069 PBA_22K : PBA_30K;
3070 sc->sc_txfifo_head = 0;
3071 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3072 sc->sc_txfifo_size =
3073 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3074 sc->sc_txfifo_stall = 0;
3075 break;
3076 case WM_T_82571:
3077 case WM_T_82572:
3078 case WM_T_80003:
3079 sc->sc_pba = PBA_32K;
3080 break;
3081 case WM_T_82573:
3082 sc->sc_pba = PBA_12K;
3083 break;
3084 case WM_T_82574:
3085 case WM_T_82583:
3086 sc->sc_pba = PBA_20K;
3087 break;
3088 case WM_T_ICH8:
3089 sc->sc_pba = PBA_8K;
3090 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3091 break;
3092 case WM_T_ICH9:
3093 case WM_T_ICH10:
3094 case WM_T_PCH:
3095 sc->sc_pba = PBA_10K;
3096 break;
3097 default:
3098 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3099 PBA_40K : PBA_48K;
3100 break;
3101 }
3102 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3103
3104 if (sc->sc_flags & WM_F_PCIE) {
3105 int timeout = 800;
3106
3107 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3108 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3109
3110 while (timeout--) {
3111 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3112 break;
3113 delay(100);
3114 }
3115 }
3116
3117 /* clear interrupt */
3118 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3119
3120 /* Stop the transmit and receive processes. */
3121 CSR_WRITE(sc, WMREG_RCTL, 0);
3122 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3123
3124 /* set_tbi_sbp_82543() */
3125
3126 delay(10*1000);
3127
3128 /* Must acquire the MDIO ownership before MAC reset */
3129 switch(sc->sc_type) {
3130 case WM_T_82573:
3131 case WM_T_82574:
3132 case WM_T_82583:
3133 i = 0;
3134 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3135 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3136 do {
3137 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3138 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3139 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3140 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3141 break;
3142 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3143 delay(2*1000);
3144 i++;
3145 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3146 break;
3147 default:
3148 break;
3149 }
3150
3151 /*
3152 * 82541 Errata 29? & 82547 Errata 28?
3153 * See also the description about PHY_RST bit in CTRL register
3154 * in 8254x_GBe_SDM.pdf.
3155 */
3156 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3157 CSR_WRITE(sc, WMREG_CTRL,
3158 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3159 delay(5000);
3160 }
3161
3162 if (sc->sc_type == WM_T_PCH) {
3163 /* Save K1 */
3164 }
3165
3166 switch (sc->sc_type) {
3167 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3168 case WM_T_82541:
3169 case WM_T_82541_2:
3170 case WM_T_82547:
3171 case WM_T_82547_2:
3172 /*
3173 * On some chipsets, a reset through a memory-mapped write
3174 * cycle can cause the chip to reset before completing the
3175 * write cycle. This causes major headache that can be
3176 * avoided by issuing the reset via indirect register writes
3177 * through I/O space.
3178 *
3179 * So, if we successfully mapped the I/O BAR at attach time,
3180 * use that. Otherwise, try our luck with a memory-mapped
3181 * reset.
3182 */
3183 if (sc->sc_flags & WM_F_IOH_VALID)
3184 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3185 else
3186 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3187 break;
3188 case WM_T_82545_3:
3189 case WM_T_82546_3:
3190 /* Use the shadow control register on these chips. */
3191 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3192 break;
3193 case WM_T_80003:
3194 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3195 mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3196 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3197 wm_get_swfw_semaphore(sc, mask);
3198 CSR_WRITE(sc, WMREG_CTRL, reg);
3199 wm_put_swfw_semaphore(sc, mask);
3200 break;
3201 case WM_T_ICH8:
3202 case WM_T_ICH9:
3203 case WM_T_ICH10:
3204 case WM_T_PCH:
3205 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3206 if (wm_check_reset_block(sc) == 0) {
3207 if (sc->sc_type >= WM_T_PCH) {
3208 uint32_t status;
3209
3210 status = CSR_READ(sc, WMREG_STATUS);
3211 CSR_WRITE(sc, WMREG_STATUS,
3212 status & ~STATUS_PHYRA);
3213 }
3214
3215 reg |= CTRL_PHY_RESET;
3216 phy_reset = 1;
3217 }
3218 wm_get_swfwhw_semaphore(sc);
3219 CSR_WRITE(sc, WMREG_CTRL, reg);
3220 delay(20*1000);
3221 wm_put_swfwhw_semaphore(sc);
3222 break;
3223 case WM_T_82542_2_0:
3224 case WM_T_82542_2_1:
3225 case WM_T_82543:
3226 case WM_T_82540:
3227 case WM_T_82545:
3228 case WM_T_82546:
3229 case WM_T_82571:
3230 case WM_T_82572:
3231 case WM_T_82573:
3232 case WM_T_82574:
3233 case WM_T_82583:
3234 default:
3235 /* Everything else can safely use the documented method. */
3236 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3237 break;
3238 }
3239
3240 if (phy_reset != 0)
3241 wm_get_cfg_done(sc);
3242
3243 /* reload EEPROM */
3244 switch(sc->sc_type) {
3245 case WM_T_82542_2_0:
3246 case WM_T_82542_2_1:
3247 case WM_T_82543:
3248 case WM_T_82544:
3249 delay(10);
3250 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3251 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3252 delay(2000);
3253 break;
3254 case WM_T_82540:
3255 case WM_T_82545:
3256 case WM_T_82545_3:
3257 case WM_T_82546:
3258 case WM_T_82546_3:
3259 delay(5*1000);
3260 /* XXX Disable HW ARPs on ASF enabled adapters */
3261 break;
3262 case WM_T_82541:
3263 case WM_T_82541_2:
3264 case WM_T_82547:
3265 case WM_T_82547_2:
3266 delay(20000);
3267 /* XXX Disable HW ARPs on ASF enabled adapters */
3268 break;
3269 case WM_T_82571:
3270 case WM_T_82572:
3271 case WM_T_82573:
3272 case WM_T_82574:
3273 case WM_T_82583:
3274 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3275 delay(10);
3276 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3277 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3278 }
3279 /* check EECD_EE_AUTORD */
3280 wm_get_auto_rd_done(sc);
3281 /*
3282 * Phy configuration from NVM just starts after EECD_AUTO_RD
3283 * is set.
3284 */
3285 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3286 || (sc->sc_type == WM_T_82583))
3287 delay(25*1000);
3288 break;
3289 case WM_T_80003:
3290 case WM_T_ICH8:
3291 case WM_T_ICH9:
3292 /* check EECD_EE_AUTORD */
3293 wm_get_auto_rd_done(sc);
3294 break;
3295 case WM_T_ICH10:
3296 case WM_T_PCH:
3297 wm_lan_init_done(sc);
3298 break;
3299 default:
3300 panic("%s: unknown type\n", __func__);
3301 }
3302
3303 /* reload sc_ctrl */
3304 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3305
3306 /*
3307 * For PCH, this write will make sure that any noise will be detected
3308 * as a CRC error and be dropped rather than show up as a bad packet
3309 * to the DMA engine
3310 */
3311 if (sc->sc_type == WM_T_PCH)
3312 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3313
3314 #if 0
3315 for (i = 0; i < 1000; i++) {
3316 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3317 return;
3318 }
3319 delay(20);
3320 }
3321
3322 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3323 log(LOG_ERR, "%s: reset failed to complete\n",
3324 device_xname(sc->sc_dev));
3325 #endif
3326 }
3327
3328 /*
3329 * wm_init: [ifnet interface function]
3330 *
3331 * Initialize the interface. Must be called at splnet().
3332 */
3333 static int
3334 wm_init(struct ifnet *ifp)
3335 {
3336 struct wm_softc *sc = ifp->if_softc;
3337 struct wm_rxsoft *rxs;
3338 int i, error = 0;
3339 uint32_t reg;
3340
3341 /*
3342 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3343 * There is a small but measurable benefit to avoiding the adjusment
3344 * of the descriptor so that the headers are aligned, for normal mtu,
3345 * on such platforms. One possibility is that the DMA itself is
3346 * slightly more efficient if the front of the entire packet (instead
3347 * of the front of the headers) is aligned.
3348 *
3349 * Note we must always set align_tweak to 0 if we are using
3350 * jumbo frames.
3351 */
3352 #ifdef __NO_STRICT_ALIGNMENT
3353 sc->sc_align_tweak = 0;
3354 #else
3355 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3356 sc->sc_align_tweak = 0;
3357 else
3358 sc->sc_align_tweak = 2;
3359 #endif /* __NO_STRICT_ALIGNMENT */
3360
3361 /* Cancel any pending I/O. */
3362 wm_stop(ifp, 0);
3363
3364 /* update statistics before reset */
3365 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3366 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3367
3368 /* Reset the chip to a known state. */
3369 wm_reset(sc);
3370
3371 switch (sc->sc_type) {
3372 case WM_T_82571:
3373 case WM_T_82572:
3374 case WM_T_82573:
3375 case WM_T_82574:
3376 case WM_T_82583:
3377 case WM_T_80003:
3378 case WM_T_ICH8:
3379 case WM_T_ICH9:
3380 case WM_T_ICH10:
3381 case WM_T_PCH:
3382 if (wm_check_mng_mode(sc) != 0)
3383 wm_get_hw_control(sc);
3384 break;
3385 default:
3386 break;
3387 }
3388
3389 /* Reset the PHY. */
3390 if (sc->sc_flags & WM_F_HAS_MII)
3391 wm_gmii_reset(sc);
3392
3393 /* Initialize the transmit descriptor ring. */
3394 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3395 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3396 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3397 sc->sc_txfree = WM_NTXDESC(sc);
3398 sc->sc_txnext = 0;
3399
3400 if (sc->sc_type < WM_T_82543) {
3401 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3402 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3403 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3404 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3405 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3406 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3407 } else {
3408 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3409 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3410 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3411 CSR_WRITE(sc, WMREG_TDH, 0);
3412 CSR_WRITE(sc, WMREG_TDT, 0);
3413 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3414 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3415
3416 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3417 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3418 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3419 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3420 }
3421 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3422 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3423
3424 /* Initialize the transmit job descriptors. */
3425 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3426 sc->sc_txsoft[i].txs_mbuf = NULL;
3427 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3428 sc->sc_txsnext = 0;
3429 sc->sc_txsdirty = 0;
3430
3431 /*
3432 * Initialize the receive descriptor and receive job
3433 * descriptor rings.
3434 */
3435 if (sc->sc_type < WM_T_82543) {
3436 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3437 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3438 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3439 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3440 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3441 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3442
3443 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3444 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3445 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3446 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3447 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3448 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3449 } else {
3450 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3451 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3452 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3453 CSR_WRITE(sc, WMREG_RDH, 0);
3454 CSR_WRITE(sc, WMREG_RDT, 0);
3455 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3456 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3457 }
3458 for (i = 0; i < WM_NRXDESC; i++) {
3459 rxs = &sc->sc_rxsoft[i];
3460 if (rxs->rxs_mbuf == NULL) {
3461 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3462 log(LOG_ERR, "%s: unable to allocate or map rx "
3463 "buffer %d, error = %d\n",
3464 device_xname(sc->sc_dev), i, error);
3465 /*
3466 * XXX Should attempt to run with fewer receive
3467 * XXX buffers instead of just failing.
3468 */
3469 wm_rxdrain(sc);
3470 goto out;
3471 }
3472 } else
3473 WM_INIT_RXDESC(sc, i);
3474 }
3475 sc->sc_rxptr = 0;
3476 sc->sc_rxdiscard = 0;
3477 WM_RXCHAIN_RESET(sc);
3478
3479 /*
3480 * Clear out the VLAN table -- we don't use it (yet).
3481 */
3482 CSR_WRITE(sc, WMREG_VET, 0);
3483 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3484 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3485
3486 /*
3487 * Set up flow-control parameters.
3488 *
3489 * XXX Values could probably stand some tuning.
3490 */
3491 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3492 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3493 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3494 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3495 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3496 }
3497
3498 sc->sc_fcrtl = FCRTL_DFLT;
3499 if (sc->sc_type < WM_T_82543) {
3500 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3501 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3502 } else {
3503 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3504 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3505 }
3506
3507 if (sc->sc_type == WM_T_80003)
3508 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3509 else
3510 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3511
3512 /* Deal with VLAN enables. */
3513 if (VLAN_ATTACHED(&sc->sc_ethercom))
3514 sc->sc_ctrl |= CTRL_VME;
3515 else
3516 sc->sc_ctrl &= ~CTRL_VME;
3517
3518 /* Write the control registers. */
3519 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3520
3521 if (sc->sc_flags & WM_F_HAS_MII) {
3522 int val;
3523
3524 switch (sc->sc_type) {
3525 case WM_T_80003:
3526 case WM_T_ICH8:
3527 case WM_T_ICH9:
3528 case WM_T_ICH10:
3529 case WM_T_PCH:
3530 /*
3531 * Set the mac to wait the maximum time between each
3532 * iteration and increase the max iterations when
3533 * polling the phy; this fixes erroneous timeouts at
3534 * 10Mbps.
3535 */
3536 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3537 0xFFFF);
3538 val = wm_kmrn_readreg(sc,
3539 KUMCTRLSTA_OFFSET_INB_PARAM);
3540 val |= 0x3F;
3541 wm_kmrn_writereg(sc,
3542 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3543 break;
3544 default:
3545 break;
3546 }
3547
3548 if (sc->sc_type == WM_T_80003) {
3549 val = CSR_READ(sc, WMREG_CTRL_EXT);
3550 val &= ~CTRL_EXT_LINK_MODE_MASK;
3551 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3552
3553 /* Bypass RX and TX FIFO's */
3554 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3555 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3556 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3557
3558 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3559 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3560 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3561 }
3562 }
3563 #if 0
3564 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3565 #endif
3566
3567 /*
3568 * Set up checksum offload parameters.
3569 */
3570 reg = CSR_READ(sc, WMREG_RXCSUM);
3571 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3572 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3573 reg |= RXCSUM_IPOFL;
3574 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3575 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3576 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3577 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3578 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3579
3580 /* Reset TBI's RXCFG count */
3581 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3582
3583 /*
3584 * Set up the interrupt registers.
3585 */
3586 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3587 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3588 ICR_RXO | ICR_RXT0;
3589 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3590 sc->sc_icr |= ICR_RXCFG;
3591 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3592
3593 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3594 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3595 reg = CSR_READ(sc, WMREG_KABGTXD);
3596 reg |= KABGTXD_BGSQLBIAS;
3597 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3598 }
3599
3600 /* Set up the inter-packet gap. */
3601 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3602
3603 if (sc->sc_type >= WM_T_82543) {
3604 /*
3605 * Set up the interrupt throttling register (units of 256ns)
3606 * Note that a footnote in Intel's documentation says this
3607 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3608 * or 10Mbit mode. Empirically, it appears to be the case
3609 * that that is also true for the 1024ns units of the other
3610 * interrupt-related timer registers -- so, really, we ought
3611 * to divide this value by 4 when the link speed is low.
3612 *
3613 * XXX implement this division at link speed change!
3614 */
3615
3616 /*
3617 * For N interrupts/sec, set this value to:
3618 * 1000000000 / (N * 256). Note that we set the
3619 * absolute and packet timer values to this value
3620 * divided by 4 to get "simple timer" behavior.
3621 */
3622
3623 sc->sc_itr = 1500; /* 2604 ints/sec */
3624 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3625 }
3626
3627 /* Set the VLAN ethernetype. */
3628 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3629
3630 /*
3631 * Set up the transmit control register; we start out with
3632 * a collision distance suitable for FDX, but update it whe
3633 * we resolve the media type.
3634 */
3635 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3636 | TCTL_CT(TX_COLLISION_THRESHOLD)
3637 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3638 if (sc->sc_type >= WM_T_82571)
3639 sc->sc_tctl |= TCTL_MULR;
3640 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3641
3642 if (sc->sc_type == WM_T_80003) {
3643 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3644 reg &= ~TCTL_EXT_GCEX_MASK;
3645 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3646 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3647 }
3648
3649 /* Set the media. */
3650 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3651 goto out;
3652
3653 /*
3654 * Set up the receive control register; we actually program
3655 * the register when we set the receive filter. Use multicast
3656 * address offset type 0.
3657 *
3658 * Only the i82544 has the ability to strip the incoming
3659 * CRC, so we don't enable that feature.
3660 */
3661 sc->sc_mchash_type = 0;
3662 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3663 | RCTL_MO(sc->sc_mchash_type);
3664
3665 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3666 && (ifp->if_mtu > ETHERMTU))
3667 sc->sc_rctl |= RCTL_LPE;
3668
3669 if (MCLBYTES == 2048) {
3670 sc->sc_rctl |= RCTL_2k;
3671 } else {
3672 if (sc->sc_type >= WM_T_82543) {
3673 switch(MCLBYTES) {
3674 case 4096:
3675 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3676 break;
3677 case 8192:
3678 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3679 break;
3680 case 16384:
3681 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3682 break;
3683 default:
3684 panic("wm_init: MCLBYTES %d unsupported",
3685 MCLBYTES);
3686 break;
3687 }
3688 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3689 }
3690
3691 /* Set the receive filter. */
3692 wm_set_filter(sc);
3693
3694 /* Start the one second link check clock. */
3695 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3696
3697 /* ...all done! */
3698 ifp->if_flags |= IFF_RUNNING;
3699 ifp->if_flags &= ~IFF_OACTIVE;
3700
3701 out:
3702 if (error)
3703 log(LOG_ERR, "%s: interface not running\n",
3704 device_xname(sc->sc_dev));
3705 return (error);
3706 }
3707
3708 /*
3709 * wm_rxdrain:
3710 *
3711 * Drain the receive queue.
3712 */
3713 static void
3714 wm_rxdrain(struct wm_softc *sc)
3715 {
3716 struct wm_rxsoft *rxs;
3717 int i;
3718
3719 for (i = 0; i < WM_NRXDESC; i++) {
3720 rxs = &sc->sc_rxsoft[i];
3721 if (rxs->rxs_mbuf != NULL) {
3722 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3723 m_freem(rxs->rxs_mbuf);
3724 rxs->rxs_mbuf = NULL;
3725 }
3726 }
3727 }
3728
3729 /*
3730 * wm_stop: [ifnet interface function]
3731 *
3732 * Stop transmission on the interface.
3733 */
3734 static void
3735 wm_stop(struct ifnet *ifp, int disable)
3736 {
3737 struct wm_softc *sc = ifp->if_softc;
3738 struct wm_txsoft *txs;
3739 int i;
3740
3741 /* Stop the one second clock. */
3742 callout_stop(&sc->sc_tick_ch);
3743
3744 /* Stop the 82547 Tx FIFO stall check timer. */
3745 if (sc->sc_type == WM_T_82547)
3746 callout_stop(&sc->sc_txfifo_ch);
3747
3748 if (sc->sc_flags & WM_F_HAS_MII) {
3749 /* Down the MII. */
3750 mii_down(&sc->sc_mii);
3751 } else {
3752 #if 0
3753 /* Should we clear PHY's status properly? */
3754 wm_reset(sc);
3755 #endif
3756 }
3757
3758 /* Stop the transmit and receive processes. */
3759 CSR_WRITE(sc, WMREG_TCTL, 0);
3760 CSR_WRITE(sc, WMREG_RCTL, 0);
3761
3762 /*
3763 * Clear the interrupt mask to ensure the device cannot assert its
3764 * interrupt line.
3765 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3766 * any currently pending or shared interrupt.
3767 */
3768 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3769 sc->sc_icr = 0;
3770
3771 /* Release any queued transmit buffers. */
3772 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3773 txs = &sc->sc_txsoft[i];
3774 if (txs->txs_mbuf != NULL) {
3775 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3776 m_freem(txs->txs_mbuf);
3777 txs->txs_mbuf = NULL;
3778 }
3779 }
3780
3781 /* Mark the interface as down and cancel the watchdog timer. */
3782 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3783 ifp->if_timer = 0;
3784
3785 if (disable)
3786 wm_rxdrain(sc);
3787 }
3788
3789 void
3790 wm_get_auto_rd_done(struct wm_softc *sc)
3791 {
3792 int i;
3793
3794 /* wait for eeprom to reload */
3795 switch (sc->sc_type) {
3796 case WM_T_82571:
3797 case WM_T_82572:
3798 case WM_T_82573:
3799 case WM_T_82574:
3800 case WM_T_82583:
3801 case WM_T_80003:
3802 case WM_T_ICH8:
3803 case WM_T_ICH9:
3804 for (i = 0; i < 10; i++) {
3805 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3806 break;
3807 delay(1000);
3808 }
3809 if (i == 10) {
3810 log(LOG_ERR, "%s: auto read from eeprom failed to "
3811 "complete\n", device_xname(sc->sc_dev));
3812 }
3813 break;
3814 default:
3815 break;
3816 }
3817 }
3818
3819 void
3820 wm_lan_init_done(struct wm_softc *sc)
3821 {
3822 uint32_t reg = 0;
3823 int i;
3824
3825 /* wait for eeprom to reload */
3826 switch (sc->sc_type) {
3827 case WM_T_ICH10:
3828 case WM_T_PCH:
3829 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3830 reg = CSR_READ(sc, WMREG_STATUS);
3831 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3832 break;
3833 delay(100);
3834 }
3835 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3836 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3837 "complete\n", device_xname(sc->sc_dev), __func__);
3838 }
3839 break;
3840 default:
3841 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3842 __func__);
3843 break;
3844 }
3845
3846 reg &= ~STATUS_LAN_INIT_DONE;
3847 CSR_WRITE(sc, WMREG_STATUS, reg);
3848 }
3849
3850 void
3851 wm_get_cfg_done(struct wm_softc *sc)
3852 {
3853 int func = 0;
3854 int mask;
3855 uint32_t reg;
3856 int i;
3857
3858 /* wait for eeprom to reload */
3859 switch (sc->sc_type) {
3860 case WM_T_82542_2_0:
3861 case WM_T_82542_2_1:
3862 /* null */
3863 break;
3864 case WM_T_82543:
3865 case WM_T_82544:
3866 case WM_T_82540:
3867 case WM_T_82545:
3868 case WM_T_82545_3:
3869 case WM_T_82546:
3870 case WM_T_82546_3:
3871 case WM_T_82541:
3872 case WM_T_82541_2:
3873 case WM_T_82547:
3874 case WM_T_82547_2:
3875 case WM_T_82573:
3876 case WM_T_82574:
3877 case WM_T_82583:
3878 /* generic */
3879 delay(10*1000);
3880 break;
3881 case WM_T_80003:
3882 case WM_T_82571:
3883 case WM_T_82572:
3884 if (sc->sc_type == WM_T_80003)
3885 func = (CSR_READ(sc, WMREG_STATUS)
3886 >> STATUS_FUNCID_SHIFT) & 1;
3887 else
3888 func = 0; /* XXX Is it true for 82571? */
3889 mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3890 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3891 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3892 break;
3893 delay(1000);
3894 }
3895 if (i >= WM_PHY_CFG_TIMEOUT) {
3896 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3897 device_xname(sc->sc_dev), __func__));
3898 }
3899 break;
3900 case WM_T_ICH8:
3901 case WM_T_ICH9:
3902 case WM_T_ICH10:
3903 case WM_T_PCH:
3904 if (sc->sc_type >= WM_T_PCH) {
3905 reg = CSR_READ(sc, WMREG_STATUS);
3906 if ((reg & STATUS_PHYRA) != 0)
3907 CSR_WRITE(sc, WMREG_STATUS,
3908 reg & ~STATUS_PHYRA);
3909 }
3910 delay(10*1000);
3911 break;
3912 default:
3913 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3914 __func__);
3915 break;
3916 }
3917 }
3918
3919 /*
3920 * wm_acquire_eeprom:
3921 *
3922 * Perform the EEPROM handshake required on some chips.
3923 */
3924 static int
3925 wm_acquire_eeprom(struct wm_softc *sc)
3926 {
3927 uint32_t reg;
3928 int x;
3929 int ret = 0;
3930
3931 /* always success */
3932 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3933 return 0;
3934
3935 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3936 ret = wm_get_swfwhw_semaphore(sc);
3937 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3938 /* this will also do wm_get_swsm_semaphore() if needed */
3939 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3940 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3941 ret = wm_get_swsm_semaphore(sc);
3942 }
3943
3944 if (ret) {
3945 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3946 __func__);
3947 return 1;
3948 }
3949
3950 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3951 reg = CSR_READ(sc, WMREG_EECD);
3952
3953 /* Request EEPROM access. */
3954 reg |= EECD_EE_REQ;
3955 CSR_WRITE(sc, WMREG_EECD, reg);
3956
3957 /* ..and wait for it to be granted. */
3958 for (x = 0; x < 1000; x++) {
3959 reg = CSR_READ(sc, WMREG_EECD);
3960 if (reg & EECD_EE_GNT)
3961 break;
3962 delay(5);
3963 }
3964 if ((reg & EECD_EE_GNT) == 0) {
3965 aprint_error_dev(sc->sc_dev,
3966 "could not acquire EEPROM GNT\n");
3967 reg &= ~EECD_EE_REQ;
3968 CSR_WRITE(sc, WMREG_EECD, reg);
3969 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3970 wm_put_swfwhw_semaphore(sc);
3971 if (sc->sc_flags & WM_F_SWFW_SYNC)
3972 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3973 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3974 wm_put_swsm_semaphore(sc);
3975 return (1);
3976 }
3977 }
3978
3979 return (0);
3980 }
3981
3982 /*
3983 * wm_release_eeprom:
3984 *
3985 * Release the EEPROM mutex.
3986 */
3987 static void
3988 wm_release_eeprom(struct wm_softc *sc)
3989 {
3990 uint32_t reg;
3991
3992 /* always success */
3993 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3994 return;
3995
3996 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3997 reg = CSR_READ(sc, WMREG_EECD);
3998 reg &= ~EECD_EE_REQ;
3999 CSR_WRITE(sc, WMREG_EECD, reg);
4000 }
4001
4002 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
4003 wm_put_swfwhw_semaphore(sc);
4004 if (sc->sc_flags & WM_F_SWFW_SYNC)
4005 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
4006 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4007 wm_put_swsm_semaphore(sc);
4008 }
4009
4010 /*
4011 * wm_eeprom_sendbits:
4012 *
4013 * Send a series of bits to the EEPROM.
4014 */
4015 static void
4016 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
4017 {
4018 uint32_t reg;
4019 int x;
4020
4021 reg = CSR_READ(sc, WMREG_EECD);
4022
4023 for (x = nbits; x > 0; x--) {
4024 if (bits & (1U << (x - 1)))
4025 reg |= EECD_DI;
4026 else
4027 reg &= ~EECD_DI;
4028 CSR_WRITE(sc, WMREG_EECD, reg);
4029 delay(2);
4030 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4031 delay(2);
4032 CSR_WRITE(sc, WMREG_EECD, reg);
4033 delay(2);
4034 }
4035 }
4036
4037 /*
4038 * wm_eeprom_recvbits:
4039 *
4040 * Receive a series of bits from the EEPROM.
4041 */
4042 static void
4043 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4044 {
4045 uint32_t reg, val;
4046 int x;
4047
4048 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4049
4050 val = 0;
4051 for (x = nbits; x > 0; x--) {
4052 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4053 delay(2);
4054 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4055 val |= (1U << (x - 1));
4056 CSR_WRITE(sc, WMREG_EECD, reg);
4057 delay(2);
4058 }
4059 *valp = val;
4060 }
4061
4062 /*
4063 * wm_read_eeprom_uwire:
4064 *
4065 * Read a word from the EEPROM using the MicroWire protocol.
4066 */
4067 static int
4068 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4069 {
4070 uint32_t reg, val;
4071 int i;
4072
4073 for (i = 0; i < wordcnt; i++) {
4074 /* Clear SK and DI. */
4075 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4076 CSR_WRITE(sc, WMREG_EECD, reg);
4077
4078 /* Set CHIP SELECT. */
4079 reg |= EECD_CS;
4080 CSR_WRITE(sc, WMREG_EECD, reg);
4081 delay(2);
4082
4083 /* Shift in the READ command. */
4084 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4085
4086 /* Shift in address. */
4087 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4088
4089 /* Shift out the data. */
4090 wm_eeprom_recvbits(sc, &val, 16);
4091 data[i] = val & 0xffff;
4092
4093 /* Clear CHIP SELECT. */
4094 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4095 CSR_WRITE(sc, WMREG_EECD, reg);
4096 delay(2);
4097 }
4098
4099 return (0);
4100 }
4101
4102 /*
4103 * wm_spi_eeprom_ready:
4104 *
4105 * Wait for a SPI EEPROM to be ready for commands.
4106 */
4107 static int
4108 wm_spi_eeprom_ready(struct wm_softc *sc)
4109 {
4110 uint32_t val;
4111 int usec;
4112
4113 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4114 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4115 wm_eeprom_recvbits(sc, &val, 8);
4116 if ((val & SPI_SR_RDY) == 0)
4117 break;
4118 }
4119 if (usec >= SPI_MAX_RETRIES) {
4120 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4121 return (1);
4122 }
4123 return (0);
4124 }
4125
4126 /*
4127 * wm_read_eeprom_spi:
4128 *
4129 * Read a work from the EEPROM using the SPI protocol.
4130 */
4131 static int
4132 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4133 {
4134 uint32_t reg, val;
4135 int i;
4136 uint8_t opc;
4137
4138 /* Clear SK and CS. */
4139 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4140 CSR_WRITE(sc, WMREG_EECD, reg);
4141 delay(2);
4142
4143 if (wm_spi_eeprom_ready(sc))
4144 return (1);
4145
4146 /* Toggle CS to flush commands. */
4147 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4148 delay(2);
4149 CSR_WRITE(sc, WMREG_EECD, reg);
4150 delay(2);
4151
4152 opc = SPI_OPC_READ;
4153 if (sc->sc_ee_addrbits == 8 && word >= 128)
4154 opc |= SPI_OPC_A8;
4155
4156 wm_eeprom_sendbits(sc, opc, 8);
4157 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4158
4159 for (i = 0; i < wordcnt; i++) {
4160 wm_eeprom_recvbits(sc, &val, 16);
4161 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4162 }
4163
4164 /* Raise CS and clear SK. */
4165 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4166 CSR_WRITE(sc, WMREG_EECD, reg);
4167 delay(2);
4168
4169 return (0);
4170 }
4171
4172 #define EEPROM_CHECKSUM 0xBABA
4173 #define EEPROM_SIZE 0x0040
4174
4175 /*
4176 * wm_validate_eeprom_checksum
4177 *
4178 * The checksum is defined as the sum of the first 64 (16 bit) words.
4179 */
4180 static int
4181 wm_validate_eeprom_checksum(struct wm_softc *sc)
4182 {
4183 uint16_t checksum;
4184 uint16_t eeprom_data;
4185 int i;
4186
4187 checksum = 0;
4188
4189 for (i = 0; i < EEPROM_SIZE; i++) {
4190 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4191 return 1;
4192 checksum += eeprom_data;
4193 }
4194
4195 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4196 return 1;
4197
4198 return 0;
4199 }
4200
4201 /*
4202 * wm_read_eeprom:
4203 *
4204 * Read data from the serial EEPROM.
4205 */
4206 static int
4207 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4208 {
4209 int rv;
4210
4211 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4212 return 1;
4213
4214 if (wm_acquire_eeprom(sc))
4215 return 1;
4216
4217 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4218 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4219 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4220 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4221 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4222 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4223 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4224 else
4225 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4226
4227 wm_release_eeprom(sc);
4228 return rv;
4229 }
4230
4231 static int
4232 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4233 uint16_t *data)
4234 {
4235 int i, eerd = 0;
4236 int error = 0;
4237
4238 for (i = 0; i < wordcnt; i++) {
4239 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4240
4241 CSR_WRITE(sc, WMREG_EERD, eerd);
4242 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4243 if (error != 0)
4244 break;
4245
4246 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4247 }
4248
4249 return error;
4250 }
4251
4252 static int
4253 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4254 {
4255 uint32_t attempts = 100000;
4256 uint32_t i, reg = 0;
4257 int32_t done = -1;
4258
4259 for (i = 0; i < attempts; i++) {
4260 reg = CSR_READ(sc, rw);
4261
4262 if (reg & EERD_DONE) {
4263 done = 0;
4264 break;
4265 }
4266 delay(5);
4267 }
4268
4269 return done;
4270 }
4271
4272 /*
4273 * wm_add_rxbuf:
4274 *
4275 * Add a receive buffer to the indiciated descriptor.
4276 */
4277 static int
4278 wm_add_rxbuf(struct wm_softc *sc, int idx)
4279 {
4280 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4281 struct mbuf *m;
4282 int error;
4283
4284 MGETHDR(m, M_DONTWAIT, MT_DATA);
4285 if (m == NULL)
4286 return (ENOBUFS);
4287
4288 MCLGET(m, M_DONTWAIT);
4289 if ((m->m_flags & M_EXT) == 0) {
4290 m_freem(m);
4291 return (ENOBUFS);
4292 }
4293
4294 if (rxs->rxs_mbuf != NULL)
4295 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4296
4297 rxs->rxs_mbuf = m;
4298
4299 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4300 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4301 BUS_DMA_READ|BUS_DMA_NOWAIT);
4302 if (error) {
4303 /* XXX XXX XXX */
4304 aprint_error_dev(sc->sc_dev,
4305 "unable to load rx DMA map %d, error = %d\n",
4306 idx, error);
4307 panic("wm_add_rxbuf");
4308 }
4309
4310 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4311 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4312
4313 WM_INIT_RXDESC(sc, idx);
4314
4315 return (0);
4316 }
4317
4318 /*
4319 * wm_set_ral:
4320 *
4321 * Set an entery in the receive address list.
4322 */
4323 static void
4324 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4325 {
4326 uint32_t ral_lo, ral_hi;
4327
4328 if (enaddr != NULL) {
4329 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4330 (enaddr[3] << 24);
4331 ral_hi = enaddr[4] | (enaddr[5] << 8);
4332 ral_hi |= RAL_AV;
4333 } else {
4334 ral_lo = 0;
4335 ral_hi = 0;
4336 }
4337
4338 if (sc->sc_type >= WM_T_82544) {
4339 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4340 ral_lo);
4341 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4342 ral_hi);
4343 } else {
4344 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4345 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4346 }
4347 }
4348
4349 /*
4350 * wm_mchash:
4351 *
4352 * Compute the hash of the multicast address for the 4096-bit
4353 * multicast filter.
4354 */
4355 static uint32_t
4356 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4357 {
4358 static const int lo_shift[4] = { 4, 3, 2, 0 };
4359 static const int hi_shift[4] = { 4, 5, 6, 8 };
4360 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4361 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4362 uint32_t hash;
4363
4364 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4365 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4366 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4367 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4368 return (hash & 0x3ff);
4369 }
4370 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4371 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4372
4373 return (hash & 0xfff);
4374 }
4375
4376 /*
4377 * wm_set_filter:
4378 *
4379 * Set up the receive filter.
4380 */
4381 static void
4382 wm_set_filter(struct wm_softc *sc)
4383 {
4384 struct ethercom *ec = &sc->sc_ethercom;
4385 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4386 struct ether_multi *enm;
4387 struct ether_multistep step;
4388 bus_addr_t mta_reg;
4389 uint32_t hash, reg, bit;
4390 int i, size;
4391
4392 if (sc->sc_type >= WM_T_82544)
4393 mta_reg = WMREG_CORDOVA_MTA;
4394 else
4395 mta_reg = WMREG_MTA;
4396
4397 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4398
4399 if (ifp->if_flags & IFF_BROADCAST)
4400 sc->sc_rctl |= RCTL_BAM;
4401 if (ifp->if_flags & IFF_PROMISC) {
4402 sc->sc_rctl |= RCTL_UPE;
4403 goto allmulti;
4404 }
4405
4406 /*
4407 * Set the station address in the first RAL slot, and
4408 * clear the remaining slots.
4409 */
4410 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4411 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4412 size = WM_ICH8_RAL_TABSIZE;
4413 else
4414 size = WM_RAL_TABSIZE;
4415 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4416 for (i = 1; i < size; i++)
4417 wm_set_ral(sc, NULL, i);
4418
4419 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4420 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4421 size = WM_ICH8_MC_TABSIZE;
4422 else
4423 size = WM_MC_TABSIZE;
4424 /* Clear out the multicast table. */
4425 for (i = 0; i < size; i++)
4426 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4427
4428 ETHER_FIRST_MULTI(step, ec, enm);
4429 while (enm != NULL) {
4430 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4431 /*
4432 * We must listen to a range of multicast addresses.
4433 * For now, just accept all multicasts, rather than
4434 * trying to set only those filter bits needed to match
4435 * the range. (At this time, the only use of address
4436 * ranges is for IP multicast routing, for which the
4437 * range is big enough to require all bits set.)
4438 */
4439 goto allmulti;
4440 }
4441
4442 hash = wm_mchash(sc, enm->enm_addrlo);
4443
4444 reg = (hash >> 5);
4445 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4446 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4447 reg &= 0x1f;
4448 else
4449 reg &= 0x7f;
4450 bit = hash & 0x1f;
4451
4452 hash = CSR_READ(sc, mta_reg + (reg << 2));
4453 hash |= 1U << bit;
4454
4455 /* XXX Hardware bug?? */
4456 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4457 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4458 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4459 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4460 } else
4461 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4462
4463 ETHER_NEXT_MULTI(step, enm);
4464 }
4465
4466 ifp->if_flags &= ~IFF_ALLMULTI;
4467 goto setit;
4468
4469 allmulti:
4470 ifp->if_flags |= IFF_ALLMULTI;
4471 sc->sc_rctl |= RCTL_MPE;
4472
4473 setit:
4474 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4475 }
4476
4477 /*
4478 * wm_tbi_mediainit:
4479 *
4480 * Initialize media for use on 1000BASE-X devices.
4481 */
4482 static void
4483 wm_tbi_mediainit(struct wm_softc *sc)
4484 {
4485 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4486 const char *sep = "";
4487
4488 if (sc->sc_type < WM_T_82543)
4489 sc->sc_tipg = TIPG_WM_DFLT;
4490 else
4491 sc->sc_tipg = TIPG_LG_DFLT;
4492
4493 sc->sc_tbi_anegticks = 5;
4494
4495 /* Initialize our media structures */
4496 sc->sc_mii.mii_ifp = ifp;
4497
4498 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4499 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4500 wm_tbi_mediastatus);
4501
4502 /*
4503 * SWD Pins:
4504 *
4505 * 0 = Link LED (output)
4506 * 1 = Loss Of Signal (input)
4507 */
4508 sc->sc_ctrl |= CTRL_SWDPIO(0);
4509 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4510
4511 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4512
4513 #define ADD(ss, mm, dd) \
4514 do { \
4515 aprint_normal("%s%s", sep, ss); \
4516 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4517 sep = ", "; \
4518 } while (/*CONSTCOND*/0)
4519
4520 aprint_normal_dev(sc->sc_dev, "");
4521 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4522 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4523 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4524 aprint_normal("\n");
4525
4526 #undef ADD
4527
4528 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4529 }
4530
4531 /*
4532 * wm_tbi_mediastatus: [ifmedia interface function]
4533 *
4534 * Get the current interface media status on a 1000BASE-X device.
4535 */
4536 static void
4537 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4538 {
4539 struct wm_softc *sc = ifp->if_softc;
4540 uint32_t ctrl, status;
4541
4542 ifmr->ifm_status = IFM_AVALID;
4543 ifmr->ifm_active = IFM_ETHER;
4544
4545 status = CSR_READ(sc, WMREG_STATUS);
4546 if ((status & STATUS_LU) == 0) {
4547 ifmr->ifm_active |= IFM_NONE;
4548 return;
4549 }
4550
4551 ifmr->ifm_status |= IFM_ACTIVE;
4552 ifmr->ifm_active |= IFM_1000_SX;
4553 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4554 ifmr->ifm_active |= IFM_FDX;
4555 ctrl = CSR_READ(sc, WMREG_CTRL);
4556 if (ctrl & CTRL_RFCE)
4557 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4558 if (ctrl & CTRL_TFCE)
4559 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4560 }
4561
4562 /*
4563 * wm_tbi_mediachange: [ifmedia interface function]
4564 *
4565 * Set hardware to newly-selected media on a 1000BASE-X device.
4566 */
4567 static int
4568 wm_tbi_mediachange(struct ifnet *ifp)
4569 {
4570 struct wm_softc *sc = ifp->if_softc;
4571 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4572 uint32_t status;
4573 int i;
4574
4575 sc->sc_txcw = 0;
4576 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4577 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4578 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4579 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4580 sc->sc_txcw |= TXCW_ANE;
4581 } else {
4582 /*
4583 * If autonegotiation is turned off, force link up and turn on
4584 * full duplex
4585 */
4586 sc->sc_txcw &= ~TXCW_ANE;
4587 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4588 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4589 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4590 delay(1000);
4591 }
4592
4593 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4594 device_xname(sc->sc_dev),sc->sc_txcw));
4595 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4596 delay(10000);
4597
4598 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4599 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4600
4601 /*
4602 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4603 * optics detect a signal, 0 if they don't.
4604 */
4605 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4606 /* Have signal; wait for the link to come up. */
4607
4608 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4609 /*
4610 * Reset the link, and let autonegotiation do its thing
4611 */
4612 sc->sc_ctrl |= CTRL_LRST;
4613 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4614 delay(1000);
4615 sc->sc_ctrl &= ~CTRL_LRST;
4616 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4617 delay(1000);
4618 }
4619
4620 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4621 delay(10000);
4622 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4623 break;
4624 }
4625
4626 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4627 device_xname(sc->sc_dev),i));
4628
4629 status = CSR_READ(sc, WMREG_STATUS);
4630 DPRINTF(WM_DEBUG_LINK,
4631 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4632 device_xname(sc->sc_dev),status, STATUS_LU));
4633 if (status & STATUS_LU) {
4634 /* Link is up. */
4635 DPRINTF(WM_DEBUG_LINK,
4636 ("%s: LINK: set media -> link up %s\n",
4637 device_xname(sc->sc_dev),
4638 (status & STATUS_FD) ? "FDX" : "HDX"));
4639
4640 /*
4641 * NOTE: CTRL will update TFCE and RFCE automatically,
4642 * so we should update sc->sc_ctrl
4643 */
4644 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4645 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4646 sc->sc_fcrtl &= ~FCRTL_XONE;
4647 if (status & STATUS_FD)
4648 sc->sc_tctl |=
4649 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4650 else
4651 sc->sc_tctl |=
4652 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4653 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4654 sc->sc_fcrtl |= FCRTL_XONE;
4655 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4656 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4657 WMREG_OLD_FCRTL : WMREG_FCRTL,
4658 sc->sc_fcrtl);
4659 sc->sc_tbi_linkup = 1;
4660 } else {
4661 if (i == WM_LINKUP_TIMEOUT)
4662 wm_check_for_link(sc);
4663 /* Link is down. */
4664 DPRINTF(WM_DEBUG_LINK,
4665 ("%s: LINK: set media -> link down\n",
4666 device_xname(sc->sc_dev)));
4667 sc->sc_tbi_linkup = 0;
4668 }
4669 } else {
4670 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4671 device_xname(sc->sc_dev)));
4672 sc->sc_tbi_linkup = 0;
4673 }
4674
4675 wm_tbi_set_linkled(sc);
4676
4677 return (0);
4678 }
4679
4680 /*
4681 * wm_tbi_set_linkled:
4682 *
4683 * Update the link LED on 1000BASE-X devices.
4684 */
4685 static void
4686 wm_tbi_set_linkled(struct wm_softc *sc)
4687 {
4688
4689 if (sc->sc_tbi_linkup)
4690 sc->sc_ctrl |= CTRL_SWDPIN(0);
4691 else
4692 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4693
4694 /* 82540 or newer devices are active low */
4695 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4696
4697 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4698 }
4699
4700 /*
4701 * wm_tbi_check_link:
4702 *
4703 * Check the link on 1000BASE-X devices.
4704 */
4705 static void
4706 wm_tbi_check_link(struct wm_softc *sc)
4707 {
4708 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4709 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4710 uint32_t rxcw, ctrl, status;
4711
4712 status = CSR_READ(sc, WMREG_STATUS);
4713
4714 rxcw = CSR_READ(sc, WMREG_RXCW);
4715 ctrl = CSR_READ(sc, WMREG_CTRL);
4716
4717 /* set link status */
4718 if ((status & STATUS_LU) == 0) {
4719 DPRINTF(WM_DEBUG_LINK,
4720 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4721 sc->sc_tbi_linkup = 0;
4722 } else if (sc->sc_tbi_linkup == 0) {
4723 DPRINTF(WM_DEBUG_LINK,
4724 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4725 (status & STATUS_FD) ? "FDX" : "HDX"));
4726 sc->sc_tbi_linkup = 1;
4727 }
4728
4729 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4730 && ((status & STATUS_LU) == 0)) {
4731 sc->sc_tbi_linkup = 0;
4732 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4733 /* RXCFG storm! */
4734 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4735 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4736 wm_init(ifp);
4737 wm_start(ifp);
4738 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4739 /* If the timer expired, retry autonegotiation */
4740 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4741 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4742 sc->sc_tbi_ticks = 0;
4743 /*
4744 * Reset the link, and let autonegotiation do
4745 * its thing
4746 */
4747 sc->sc_ctrl |= CTRL_LRST;
4748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4749 delay(1000);
4750 sc->sc_ctrl &= ~CTRL_LRST;
4751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4752 delay(1000);
4753 CSR_WRITE(sc, WMREG_TXCW,
4754 sc->sc_txcw & ~TXCW_ANE);
4755 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4756 }
4757 }
4758 }
4759
4760 wm_tbi_set_linkled(sc);
4761 }
4762
4763 /*
4764 * wm_gmii_reset:
4765 *
4766 * Reset the PHY.
4767 */
4768 static void
4769 wm_gmii_reset(struct wm_softc *sc)
4770 {
4771 uint32_t reg;
4772 int func = 0; /* XXX gcc */
4773 int rv;
4774
4775 /* get phy semaphore */
4776 switch (sc->sc_type) {
4777 case WM_T_82571:
4778 case WM_T_82572:
4779 case WM_T_82573:
4780 case WM_T_82574:
4781 case WM_T_82583:
4782 /* XXX sould get sw semaphore, too */
4783 rv = wm_get_swsm_semaphore(sc);
4784 break;
4785 case WM_T_80003:
4786 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4787 rv = wm_get_swfw_semaphore(sc,
4788 func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4789 break;
4790 case WM_T_ICH8:
4791 case WM_T_ICH9:
4792 case WM_T_ICH10:
4793 case WM_T_PCH:
4794 rv = wm_get_swfwhw_semaphore(sc);
4795 break;
4796 default:
4797 /* nothing to do*/
4798 rv = 0;
4799 break;
4800 }
4801 if (rv != 0) {
4802 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4803 __func__);
4804 return;
4805 }
4806
4807 switch (sc->sc_type) {
4808 case WM_T_82542_2_0:
4809 case WM_T_82542_2_1:
4810 /* null */
4811 break;
4812 case WM_T_82543:
4813 /*
4814 * With 82543, we need to force speed and duplex on the MAC
4815 * equal to what the PHY speed and duplex configuration is.
4816 * In addition, we need to perform a hardware reset on the PHY
4817 * to take it out of reset.
4818 */
4819 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4821
4822 /* The PHY reset pin is active-low. */
4823 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4824 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4825 CTRL_EXT_SWDPIN(4));
4826 reg |= CTRL_EXT_SWDPIO(4);
4827
4828 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4829 delay(10*1000);
4830
4831 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4832 delay(150);
4833 #if 0
4834 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4835 #endif
4836 delay(20*1000); /* XXX extra delay to get PHY ID? */
4837 break;
4838 case WM_T_82544: /* reset 10000us */
4839 case WM_T_82540:
4840 case WM_T_82545:
4841 case WM_T_82545_3:
4842 case WM_T_82546:
4843 case WM_T_82546_3:
4844 case WM_T_82541:
4845 case WM_T_82541_2:
4846 case WM_T_82547:
4847 case WM_T_82547_2:
4848 case WM_T_82571: /* reset 100us */
4849 case WM_T_82572:
4850 case WM_T_82573:
4851 case WM_T_82574:
4852 case WM_T_82583:
4853 case WM_T_80003:
4854 /* generic reset */
4855 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4856 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4857 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4858 delay(150);
4859
4860 if ((sc->sc_type == WM_T_82541)
4861 || (sc->sc_type == WM_T_82541_2)
4862 || (sc->sc_type == WM_T_82547)
4863 || (sc->sc_type == WM_T_82547_2)) {
4864 /* workaround for igp are done in igp_reset() */
4865 /* XXX add code to set LED after phy reset */
4866 }
4867 break;
4868 case WM_T_ICH8:
4869 case WM_T_ICH9:
4870 case WM_T_ICH10:
4871 case WM_T_PCH:
4872 /* generic reset */
4873 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4874 delay(100);
4875 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4876 delay(150);
4877
4878 /* Allow time for h/w to get to a quiescent state afer reset */
4879 delay(10*1000);
4880
4881 /* XXX add code to set LED after phy reset */
4882 break;
4883 default:
4884 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4885 __func__);
4886 break;
4887 }
4888
4889 /* release PHY semaphore */
4890 switch (sc->sc_type) {
4891 case WM_T_82571:
4892 case WM_T_82572:
4893 case WM_T_82573:
4894 case WM_T_82574:
4895 case WM_T_82583:
4896 /* XXX sould put sw semaphore, too */
4897 wm_put_swsm_semaphore(sc);
4898 break;
4899 case WM_T_80003:
4900 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4901 break;
4902 case WM_T_ICH8:
4903 case WM_T_ICH9:
4904 case WM_T_ICH10:
4905 case WM_T_PCH:
4906 wm_put_swfwhw_semaphore(sc);
4907 break;
4908 default:
4909 /* nothing to do*/
4910 rv = 0;
4911 break;
4912 }
4913
4914 /* get_cfg_done */
4915 wm_get_cfg_done(sc);
4916
4917 /* extra setup */
4918 switch (sc->sc_type) {
4919 case WM_T_82542_2_0:
4920 case WM_T_82542_2_1:
4921 case WM_T_82543:
4922 case WM_T_82544:
4923 case WM_T_82540:
4924 case WM_T_82545:
4925 case WM_T_82545_3:
4926 case WM_T_82546:
4927 case WM_T_82546_3:
4928 case WM_T_82541_2:
4929 case WM_T_82547_2:
4930 case WM_T_82571:
4931 case WM_T_82572:
4932 case WM_T_82573:
4933 case WM_T_82574:
4934 case WM_T_82583:
4935 case WM_T_80003:
4936 /* null */
4937 break;
4938 case WM_T_82541:
4939 case WM_T_82547:
4940 /* XXX Configure actively LED after PHY reset */
4941 break;
4942 case WM_T_ICH8:
4943 case WM_T_ICH9:
4944 case WM_T_ICH10:
4945 case WM_T_PCH:
4946 delay(10*1000);
4947
4948 if (sc->sc_type == WM_T_PCH) {
4949 /* XXX hv_phy_workaround */
4950
4951 /* dummy read from WUC */
4952 }
4953 /* XXX SW LCD configuration from NVM */
4954
4955 if (sc->sc_type == WM_T_PCH) {
4956 /* XXX Configure the LCD with the OEM bits in NVM */
4957
4958 #if 1
4959 /*
4960 * We shlould make the new driver for 8257[78] and
4961 * move these code into it.
4962 */
4963 #define HV_OEM_BITS ((0 << 5) | 25)
4964 #define HV_OEM_BITS_LPLU (1 << 2)
4965 #define HV_OEM_BITS_A1KDIS (1 << 6)
4966 #define HV_OEM_BITS_ANEGNOW (1 << 10)
4967 #endif
4968 /*
4969 * Disable LPLU.
4970 * XXX It seems that 82567 has LPLU, too.
4971 */
4972 reg = wm_gmii_kv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
4973 reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
4974 reg |= HV_OEM_BITS_ANEGNOW;
4975 wm_gmii_kv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
4976 }
4977 break;
4978 default:
4979 panic("%s: unknown type\n", __func__);
4980 break;
4981 }
4982 }
4983
4984 /*
4985 * wm_gmii_mediainit:
4986 *
4987 * Initialize media for use on 1000BASE-T devices.
4988 */
4989 static void
4990 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
4991 {
4992 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4993
4994 /* We have MII. */
4995 sc->sc_flags |= WM_F_HAS_MII;
4996
4997 if (sc->sc_type == WM_T_80003)
4998 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4999 else
5000 sc->sc_tipg = TIPG_1000T_DFLT;
5001
5002 /*
5003 * Let the chip set speed/duplex on its own based on
5004 * signals from the PHY.
5005 * XXXbouyer - I'm not sure this is right for the 80003,
5006 * the em driver only sets CTRL_SLU here - but it seems to work.
5007 */
5008 sc->sc_ctrl |= CTRL_SLU;
5009 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5010
5011 /* Initialize our media structures and probe the GMII. */
5012 sc->sc_mii.mii_ifp = ifp;
5013
5014 switch (prodid) {
5015 case PCI_PRODUCT_INTEL_PCH_M_LM:
5016 case PCI_PRODUCT_INTEL_PCH_M_LC:
5017 case PCI_PRODUCT_INTEL_PCH_D_DM:
5018 case PCI_PRODUCT_INTEL_PCH_D_DC:
5019 /* 82577 or 82578 */
5020 sc->sc_mii.mii_readreg = wm_gmii_kv_readreg;
5021 sc->sc_mii.mii_writereg = wm_gmii_kv_writereg;
5022 break;
5023 case PCI_PRODUCT_INTEL_82801I_BM:
5024 case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
5025 case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
5026 case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
5027 case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
5028 case PCI_PRODUCT_INTEL_82801J_R_BM_V:
5029 /* 82567 */
5030 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5031 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5032 break;
5033 default:
5034 if (sc->sc_type >= WM_T_80003) {
5035 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5036 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5037 } else if (sc->sc_type >= WM_T_82544) {
5038 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
5039 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
5040 } else {
5041 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
5042 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
5043 }
5044 break;
5045
5046 }
5047 sc->sc_mii.mii_statchg = wm_gmii_statchg;
5048
5049 wm_gmii_reset(sc);
5050
5051 sc->sc_ethercom.ec_mii = &sc->sc_mii;
5052 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
5053 wm_gmii_mediastatus);
5054
5055 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5056 MII_OFFSET_ANY, MIIF_DOPAUSE);
5057
5058 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5059 /* if failed, retry with *_bm_* */
5060 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5061 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5062
5063 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5064 MII_OFFSET_ANY, MIIF_DOPAUSE);
5065 }
5066 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5067 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5068 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5069 } else
5070 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
5071 }
5072
5073 /*
5074 * wm_gmii_mediastatus: [ifmedia interface function]
5075 *
5076 * Get the current interface media status on a 1000BASE-T device.
5077 */
5078 static void
5079 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5080 {
5081 struct wm_softc *sc = ifp->if_softc;
5082
5083 ether_mediastatus(ifp, ifmr);
5084 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
5085 sc->sc_flowflags;
5086 }
5087
5088 /*
5089 * wm_gmii_mediachange: [ifmedia interface function]
5090 *
5091 * Set hardware to newly-selected media on a 1000BASE-T device.
5092 */
5093 static int
5094 wm_gmii_mediachange(struct ifnet *ifp)
5095 {
5096 struct wm_softc *sc = ifp->if_softc;
5097 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5098 int rc;
5099
5100 if ((ifp->if_flags & IFF_UP) == 0)
5101 return 0;
5102
5103 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5104 sc->sc_ctrl |= CTRL_SLU;
5105 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5106 || (sc->sc_type > WM_T_82543)) {
5107 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5108 } else {
5109 sc->sc_ctrl &= ~CTRL_ASDE;
5110 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5111 if (ife->ifm_media & IFM_FDX)
5112 sc->sc_ctrl |= CTRL_FD;
5113 switch(IFM_SUBTYPE(ife->ifm_media)) {
5114 case IFM_10_T:
5115 sc->sc_ctrl |= CTRL_SPEED_10;
5116 break;
5117 case IFM_100_TX:
5118 sc->sc_ctrl |= CTRL_SPEED_100;
5119 break;
5120 case IFM_1000_T:
5121 sc->sc_ctrl |= CTRL_SPEED_1000;
5122 break;
5123 default:
5124 panic("wm_gmii_mediachange: bad media 0x%x",
5125 ife->ifm_media);
5126 }
5127 }
5128 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5129 if (sc->sc_type <= WM_T_82543)
5130 wm_gmii_reset(sc);
5131
5132 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5133 return 0;
5134 return rc;
5135 }
5136
5137 #define MDI_IO CTRL_SWDPIN(2)
5138 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5139 #define MDI_CLK CTRL_SWDPIN(3)
5140
5141 static void
5142 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5143 {
5144 uint32_t i, v;
5145
5146 v = CSR_READ(sc, WMREG_CTRL);
5147 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5148 v |= MDI_DIR | CTRL_SWDPIO(3);
5149
5150 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5151 if (data & i)
5152 v |= MDI_IO;
5153 else
5154 v &= ~MDI_IO;
5155 CSR_WRITE(sc, WMREG_CTRL, v);
5156 delay(10);
5157 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5158 delay(10);
5159 CSR_WRITE(sc, WMREG_CTRL, v);
5160 delay(10);
5161 }
5162 }
5163
5164 static uint32_t
5165 i82543_mii_recvbits(struct wm_softc *sc)
5166 {
5167 uint32_t v, i, data = 0;
5168
5169 v = CSR_READ(sc, WMREG_CTRL);
5170 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5171 v |= CTRL_SWDPIO(3);
5172
5173 CSR_WRITE(sc, WMREG_CTRL, v);
5174 delay(10);
5175 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5176 delay(10);
5177 CSR_WRITE(sc, WMREG_CTRL, v);
5178 delay(10);
5179
5180 for (i = 0; i < 16; i++) {
5181 data <<= 1;
5182 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5183 delay(10);
5184 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5185 data |= 1;
5186 CSR_WRITE(sc, WMREG_CTRL, v);
5187 delay(10);
5188 }
5189
5190 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5191 delay(10);
5192 CSR_WRITE(sc, WMREG_CTRL, v);
5193 delay(10);
5194
5195 return (data);
5196 }
5197
5198 #undef MDI_IO
5199 #undef MDI_DIR
5200 #undef MDI_CLK
5201
5202 /*
5203 * wm_gmii_i82543_readreg: [mii interface function]
5204 *
5205 * Read a PHY register on the GMII (i82543 version).
5206 */
5207 static int
5208 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5209 {
5210 struct wm_softc *sc = device_private(self);
5211 int rv;
5212
5213 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5214 i82543_mii_sendbits(sc, reg | (phy << 5) |
5215 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5216 rv = i82543_mii_recvbits(sc) & 0xffff;
5217
5218 DPRINTF(WM_DEBUG_GMII,
5219 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5220 device_xname(sc->sc_dev), phy, reg, rv));
5221
5222 return (rv);
5223 }
5224
5225 /*
5226 * wm_gmii_i82543_writereg: [mii interface function]
5227 *
5228 * Write a PHY register on the GMII (i82543 version).
5229 */
5230 static void
5231 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5232 {
5233 struct wm_softc *sc = device_private(self);
5234
5235 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5236 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5237 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5238 (MII_COMMAND_START << 30), 32);
5239 }
5240
5241 /*
5242 * wm_gmii_i82544_readreg: [mii interface function]
5243 *
5244 * Read a PHY register on the GMII.
5245 */
5246 static int
5247 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5248 {
5249 struct wm_softc *sc = device_private(self);
5250 uint32_t mdic = 0;
5251 int i, rv;
5252
5253 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5254 MDIC_REGADD(reg));
5255
5256 for (i = 0; i < 320; i++) {
5257 mdic = CSR_READ(sc, WMREG_MDIC);
5258 if (mdic & MDIC_READY)
5259 break;
5260 delay(10);
5261 }
5262
5263 if ((mdic & MDIC_READY) == 0) {
5264 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5265 device_xname(sc->sc_dev), phy, reg);
5266 rv = 0;
5267 } else if (mdic & MDIC_E) {
5268 #if 0 /* This is normal if no PHY is present. */
5269 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5270 device_xname(sc->sc_dev), phy, reg);
5271 #endif
5272 rv = 0;
5273 } else {
5274 rv = MDIC_DATA(mdic);
5275 if (rv == 0xffff)
5276 rv = 0;
5277 }
5278
5279 return (rv);
5280 }
5281
5282 /*
5283 * wm_gmii_i82544_writereg: [mii interface function]
5284 *
5285 * Write a PHY register on the GMII.
5286 */
5287 static void
5288 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5289 {
5290 struct wm_softc *sc = device_private(self);
5291 uint32_t mdic = 0;
5292 int i;
5293
5294 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5295 MDIC_REGADD(reg) | MDIC_DATA(val));
5296
5297 for (i = 0; i < 320; i++) {
5298 mdic = CSR_READ(sc, WMREG_MDIC);
5299 if (mdic & MDIC_READY)
5300 break;
5301 delay(10);
5302 }
5303
5304 if ((mdic & MDIC_READY) == 0)
5305 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5306 device_xname(sc->sc_dev), phy, reg);
5307 else if (mdic & MDIC_E)
5308 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5309 device_xname(sc->sc_dev), phy, reg);
5310 }
5311
5312 /*
5313 * wm_gmii_i80003_readreg: [mii interface function]
5314 *
5315 * Read a PHY register on the kumeran
5316 * This could be handled by the PHY layer if we didn't have to lock the
5317 * ressource ...
5318 */
5319 static int
5320 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5321 {
5322 struct wm_softc *sc = device_private(self);
5323 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5324 int rv;
5325
5326 if (phy != 1) /* only one PHY on kumeran bus */
5327 return 0;
5328
5329 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5330 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5331 __func__);
5332 return 0;
5333 }
5334
5335 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5336 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5337 reg >> GG82563_PAGE_SHIFT);
5338 } else {
5339 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5340 reg >> GG82563_PAGE_SHIFT);
5341 }
5342 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5343 delay(200);
5344 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5345 delay(200);
5346
5347 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5348 return (rv);
5349 }
5350
5351 /*
5352 * wm_gmii_i80003_writereg: [mii interface function]
5353 *
5354 * Write a PHY register on the kumeran.
5355 * This could be handled by the PHY layer if we didn't have to lock the
5356 * ressource ...
5357 */
5358 static void
5359 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5360 {
5361 struct wm_softc *sc = device_private(self);
5362 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5363
5364 if (phy != 1) /* only one PHY on kumeran bus */
5365 return;
5366
5367 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5368 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5369 __func__);
5370 return;
5371 }
5372
5373 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5374 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5375 reg >> GG82563_PAGE_SHIFT);
5376 } else {
5377 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5378 reg >> GG82563_PAGE_SHIFT);
5379 }
5380 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5381 delay(200);
5382 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5383 delay(200);
5384
5385 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5386 }
5387
5388 /*
5389 * wm_gmii_bm_readreg: [mii interface function]
5390 *
5391 * Read a PHY register on the kumeran
5392 * This could be handled by the PHY layer if we didn't have to lock the
5393 * ressource ...
5394 */
5395 static int
5396 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5397 {
5398 struct wm_softc *sc = device_private(self);
5399 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5400 int rv;
5401
5402 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5403 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5404 __func__);
5405 return 0;
5406 }
5407
5408 if (reg > GG82563_MAX_REG_ADDRESS) {
5409 if (phy == 1)
5410 wm_gmii_i82544_writereg(self, phy, 0x1f,
5411 reg);
5412 else
5413 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5414 reg >> GG82563_PAGE_SHIFT);
5415
5416 }
5417
5418 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5419 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5420 return (rv);
5421 }
5422
5423 /*
5424 * wm_gmii_bm_writereg: [mii interface function]
5425 *
5426 * Write a PHY register on the kumeran.
5427 * This could be handled by the PHY layer if we didn't have to lock the
5428 * ressource ...
5429 */
5430 static void
5431 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5432 {
5433 struct wm_softc *sc = device_private(self);
5434 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5435
5436 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5437 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5438 __func__);
5439 return;
5440 }
5441
5442 if (reg > GG82563_MAX_REG_ADDRESS) {
5443 if (phy == 1)
5444 wm_gmii_i82544_writereg(self, phy, 0x1f,
5445 reg);
5446 else
5447 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5448 reg >> GG82563_PAGE_SHIFT);
5449
5450 }
5451
5452 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5453 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5454 }
5455
5456 /*
5457 * wm_gmii_kv_readreg: [mii interface function]
5458 *
5459 * Read a PHY register on the kumeran
5460 * This could be handled by the PHY layer if we didn't have to lock the
5461 * ressource ...
5462 */
5463 static int
5464 wm_gmii_kv_readreg(device_t self, int phy, int reg)
5465 {
5466 struct wm_softc *sc = device_private(self);
5467 int rv;
5468
5469 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5470 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5471 __func__);
5472 return 0;
5473 }
5474
5475 if (reg > GG82563_MAX_REG_ADDRESS) {
5476 printf("XXX rd pagesel\n");
5477 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5478 reg & IGPHY_PAGEMASK);
5479 }
5480
5481 rv = wm_gmii_i82544_readreg(self, phy, reg & IGPHY_MAXREGADDR);
5482 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5483 return (rv);
5484 }
5485
5486 /*
5487 * wm_gmii_kv_writereg: [mii interface function]
5488 *
5489 * Write a PHY register on the kumeran.
5490 * This could be handled by the PHY layer if we didn't have to lock the
5491 * ressource ...
5492 */
5493 static void
5494 wm_gmii_kv_writereg(device_t self, int phy, int reg, int val)
5495 {
5496 struct wm_softc *sc = device_private(self);
5497
5498 if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
5499 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5500 __func__);
5501 return;
5502 }
5503
5504 if (reg > GG82563_MAX_REG_ADDRESS) {
5505 printf("XXX wr pagesel\n");
5506 wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
5507 reg & IGPHY_PAGEMASK);
5508 }
5509
5510 wm_gmii_i82544_writereg(self, phy, reg & IGPHY_MAXREGADDR, val);
5511 wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
5512 }
5513
5514 /*
5515 * wm_gmii_statchg: [mii interface function]
5516 *
5517 * Callback from MII layer when media changes.
5518 */
5519 static void
5520 wm_gmii_statchg(device_t self)
5521 {
5522 struct wm_softc *sc = device_private(self);
5523 struct mii_data *mii = &sc->sc_mii;
5524
5525 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5526 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5527 sc->sc_fcrtl &= ~FCRTL_XONE;
5528
5529 /*
5530 * Get flow control negotiation result.
5531 */
5532 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5533 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5534 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5535 mii->mii_media_active &= ~IFM_ETH_FMASK;
5536 }
5537
5538 if (sc->sc_flowflags & IFM_FLOW) {
5539 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5540 sc->sc_ctrl |= CTRL_TFCE;
5541 sc->sc_fcrtl |= FCRTL_XONE;
5542 }
5543 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5544 sc->sc_ctrl |= CTRL_RFCE;
5545 }
5546
5547 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5548 DPRINTF(WM_DEBUG_LINK,
5549 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5550 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5551 } else {
5552 DPRINTF(WM_DEBUG_LINK,
5553 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5554 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5555 }
5556
5557 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5558 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5559 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5560 : WMREG_FCRTL, sc->sc_fcrtl);
5561 if (sc->sc_type == WM_T_80003) {
5562 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5563 case IFM_1000_T:
5564 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5565 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5566 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5567 break;
5568 default:
5569 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5570 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5571 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5572 break;
5573 }
5574 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5575 }
5576 }
5577
5578 /*
5579 * wm_kmrn_readreg:
5580 *
5581 * Read a kumeran register
5582 */
5583 static int
5584 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5585 {
5586 int rv;
5587
5588 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5589 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5590 aprint_error_dev(sc->sc_dev,
5591 "%s: failed to get semaphore\n", __func__);
5592 return 0;
5593 }
5594 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5595 if (wm_get_swfwhw_semaphore(sc)) {
5596 aprint_error_dev(sc->sc_dev,
5597 "%s: failed to get semaphore\n", __func__);
5598 return 0;
5599 }
5600 }
5601
5602 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5603 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5604 KUMCTRLSTA_REN);
5605 delay(2);
5606
5607 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5608
5609 if (sc->sc_flags == WM_F_SWFW_SYNC)
5610 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5611 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5612 wm_put_swfwhw_semaphore(sc);
5613
5614 return (rv);
5615 }
5616
5617 /*
5618 * wm_kmrn_writereg:
5619 *
5620 * Write a kumeran register
5621 */
5622 static void
5623 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5624 {
5625
5626 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5627 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5628 aprint_error_dev(sc->sc_dev,
5629 "%s: failed to get semaphore\n", __func__);
5630 return;
5631 }
5632 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5633 if (wm_get_swfwhw_semaphore(sc)) {
5634 aprint_error_dev(sc->sc_dev,
5635 "%s: failed to get semaphore\n", __func__);
5636 return;
5637 }
5638 }
5639
5640 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5641 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5642 (val & KUMCTRLSTA_MASK));
5643
5644 if (sc->sc_flags == WM_F_SWFW_SYNC)
5645 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5646 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5647 wm_put_swfwhw_semaphore(sc);
5648 }
5649
5650 static int
5651 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5652 {
5653 uint32_t eecd = 0;
5654
5655 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5656 || sc->sc_type == WM_T_82583) {
5657 eecd = CSR_READ(sc, WMREG_EECD);
5658
5659 /* Isolate bits 15 & 16 */
5660 eecd = ((eecd >> 15) & 0x03);
5661
5662 /* If both bits are set, device is Flash type */
5663 if (eecd == 0x03)
5664 return 0;
5665 }
5666 return 1;
5667 }
5668
5669 static int
5670 wm_get_swsm_semaphore(struct wm_softc *sc)
5671 {
5672 int32_t timeout;
5673 uint32_t swsm;
5674
5675 /* Get the FW semaphore. */
5676 timeout = 1000 + 1; /* XXX */
5677 while (timeout) {
5678 swsm = CSR_READ(sc, WMREG_SWSM);
5679 swsm |= SWSM_SWESMBI;
5680 CSR_WRITE(sc, WMREG_SWSM, swsm);
5681 /* if we managed to set the bit we got the semaphore. */
5682 swsm = CSR_READ(sc, WMREG_SWSM);
5683 if (swsm & SWSM_SWESMBI)
5684 break;
5685
5686 delay(50);
5687 timeout--;
5688 }
5689
5690 if (timeout == 0) {
5691 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5692 /* Release semaphores */
5693 wm_put_swsm_semaphore(sc);
5694 return 1;
5695 }
5696 return 0;
5697 }
5698
5699 static void
5700 wm_put_swsm_semaphore(struct wm_softc *sc)
5701 {
5702 uint32_t swsm;
5703
5704 swsm = CSR_READ(sc, WMREG_SWSM);
5705 swsm &= ~(SWSM_SWESMBI);
5706 CSR_WRITE(sc, WMREG_SWSM, swsm);
5707 }
5708
5709 static int
5710 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5711 {
5712 uint32_t swfw_sync;
5713 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5714 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5715 int timeout = 200;
5716
5717 for(timeout = 0; timeout < 200; timeout++) {
5718 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5719 if (wm_get_swsm_semaphore(sc)) {
5720 aprint_error_dev(sc->sc_dev,
5721 "%s: failed to get semaphore\n",
5722 __func__);
5723 return 1;
5724 }
5725 }
5726 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5727 if ((swfw_sync & (swmask | fwmask)) == 0) {
5728 swfw_sync |= swmask;
5729 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5730 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5731 wm_put_swsm_semaphore(sc);
5732 return 0;
5733 }
5734 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5735 wm_put_swsm_semaphore(sc);
5736 delay(5000);
5737 }
5738 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5739 device_xname(sc->sc_dev), mask, swfw_sync);
5740 return 1;
5741 }
5742
5743 static void
5744 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5745 {
5746 uint32_t swfw_sync;
5747
5748 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5749 while (wm_get_swsm_semaphore(sc) != 0)
5750 continue;
5751 }
5752 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5753 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5754 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5755 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5756 wm_put_swsm_semaphore(sc);
5757 }
5758
5759 static int
5760 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5761 {
5762 uint32_t ext_ctrl;
5763 int timeout = 200;
5764
5765 for(timeout = 0; timeout < 200; timeout++) {
5766 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5767 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5768 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5769
5770 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5771 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5772 return 0;
5773 delay(5000);
5774 }
5775 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5776 device_xname(sc->sc_dev), ext_ctrl);
5777 return 1;
5778 }
5779
5780 static void
5781 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5782 {
5783 uint32_t ext_ctrl;
5784 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5785 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5786 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5787 }
5788
5789 static int
5790 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5791 {
5792 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5793 uint8_t bank_high_byte;
5794 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5795
5796 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
5797 /* Value of bit 22 corresponds to the flash bank we're on. */
5798 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5799 } else {
5800 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5801 if ((bank_high_byte & 0xc0) == 0x80)
5802 *bank = 0;
5803 else {
5804 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5805 &bank_high_byte);
5806 if ((bank_high_byte & 0xc0) == 0x80)
5807 *bank = 1;
5808 else {
5809 aprint_error_dev(sc->sc_dev,
5810 "EEPROM not present\n");
5811 return -1;
5812 }
5813 }
5814 }
5815
5816 return 0;
5817 }
5818
5819 /******************************************************************************
5820 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5821 * register.
5822 *
5823 * sc - Struct containing variables accessed by shared code
5824 * offset - offset of word in the EEPROM to read
5825 * data - word read from the EEPROM
5826 * words - number of words to read
5827 *****************************************************************************/
5828 static int
5829 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5830 {
5831 int32_t error = 0;
5832 uint32_t flash_bank = 0;
5833 uint32_t act_offset = 0;
5834 uint32_t bank_offset = 0;
5835 uint16_t word = 0;
5836 uint16_t i = 0;
5837
5838 /* We need to know which is the valid flash bank. In the event
5839 * that we didn't allocate eeprom_shadow_ram, we may not be
5840 * managing flash_bank. So it cannot be trusted and needs
5841 * to be updated with each read.
5842 */
5843 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5844 if (error) {
5845 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5846 __func__);
5847 return error;
5848 }
5849
5850 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5851 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5852
5853 error = wm_get_swfwhw_semaphore(sc);
5854 if (error) {
5855 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5856 __func__);
5857 return error;
5858 }
5859
5860 for (i = 0; i < words; i++) {
5861 /* The NVM part needs a byte offset, hence * 2 */
5862 act_offset = bank_offset + ((offset + i) * 2);
5863 error = wm_read_ich8_word(sc, act_offset, &word);
5864 if (error) {
5865 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5866 __func__);
5867 break;
5868 }
5869 data[i] = word;
5870 }
5871
5872 wm_put_swfwhw_semaphore(sc);
5873 return error;
5874 }
5875
5876 /******************************************************************************
5877 * This function does initial flash setup so that a new read/write/erase cycle
5878 * can be started.
5879 *
5880 * sc - The pointer to the hw structure
5881 ****************************************************************************/
5882 static int32_t
5883 wm_ich8_cycle_init(struct wm_softc *sc)
5884 {
5885 uint16_t hsfsts;
5886 int32_t error = 1;
5887 int32_t i = 0;
5888
5889 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5890
5891 /* May be check the Flash Des Valid bit in Hw status */
5892 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5893 return error;
5894 }
5895
5896 /* Clear FCERR in Hw status by writing 1 */
5897 /* Clear DAEL in Hw status by writing a 1 */
5898 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5899
5900 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5901
5902 /* Either we should have a hardware SPI cycle in progress bit to check
5903 * against, in order to start a new cycle or FDONE bit should be changed
5904 * in the hardware so that it is 1 after harware reset, which can then be
5905 * used as an indication whether a cycle is in progress or has been
5906 * completed .. we should also have some software semaphore mechanism to
5907 * guard FDONE or the cycle in progress bit so that two threads access to
5908 * those bits can be sequentiallized or a way so that 2 threads dont
5909 * start the cycle at the same time */
5910
5911 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5912 /* There is no cycle running at present, so we can start a cycle */
5913 /* Begin by setting Flash Cycle Done. */
5914 hsfsts |= HSFSTS_DONE;
5915 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5916 error = 0;
5917 } else {
5918 /* otherwise poll for sometime so the current cycle has a chance
5919 * to end before giving up. */
5920 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5921 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5922 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5923 error = 0;
5924 break;
5925 }
5926 delay(1);
5927 }
5928 if (error == 0) {
5929 /* Successful in waiting for previous cycle to timeout,
5930 * now set the Flash Cycle Done. */
5931 hsfsts |= HSFSTS_DONE;
5932 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5933 }
5934 }
5935 return error;
5936 }
5937
5938 /******************************************************************************
5939 * This function starts a flash cycle and waits for its completion
5940 *
5941 * sc - The pointer to the hw structure
5942 ****************************************************************************/
5943 static int32_t
5944 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5945 {
5946 uint16_t hsflctl;
5947 uint16_t hsfsts;
5948 int32_t error = 1;
5949 uint32_t i = 0;
5950
5951 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5952 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5953 hsflctl |= HSFCTL_GO;
5954 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5955
5956 /* wait till FDONE bit is set to 1 */
5957 do {
5958 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5959 if (hsfsts & HSFSTS_DONE)
5960 break;
5961 delay(1);
5962 i++;
5963 } while (i < timeout);
5964 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5965 error = 0;
5966 }
5967 return error;
5968 }
5969
5970 /******************************************************************************
5971 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5972 *
5973 * sc - The pointer to the hw structure
5974 * index - The index of the byte or word to read.
5975 * size - Size of data to read, 1=byte 2=word
5976 * data - Pointer to the word to store the value read.
5977 *****************************************************************************/
5978 static int32_t
5979 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5980 uint32_t size, uint16_t* data)
5981 {
5982 uint16_t hsfsts;
5983 uint16_t hsflctl;
5984 uint32_t flash_linear_address;
5985 uint32_t flash_data = 0;
5986 int32_t error = 1;
5987 int32_t count = 0;
5988
5989 if (size < 1 || size > 2 || data == 0x0 ||
5990 index > ICH_FLASH_LINEAR_ADDR_MASK)
5991 return error;
5992
5993 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5994 sc->sc_ich8_flash_base;
5995
5996 do {
5997 delay(1);
5998 /* Steps */
5999 error = wm_ich8_cycle_init(sc);
6000 if (error)
6001 break;
6002
6003 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
6004 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
6005 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
6006 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
6007 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
6008
6009 /* Write the last 24 bits of index into Flash Linear address field in
6010 * Flash Address */
6011 /* TODO: TBD maybe check the index against the size of flash */
6012
6013 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
6014
6015 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
6016
6017 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
6018 * sequence a few more times, else read in (shift in) the Flash Data0,
6019 * the order is least significant byte first msb to lsb */
6020 if (error == 0) {
6021 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
6022 if (size == 1) {
6023 *data = (uint8_t)(flash_data & 0x000000FF);
6024 } else if (size == 2) {
6025 *data = (uint16_t)(flash_data & 0x0000FFFF);
6026 }
6027 break;
6028 } else {
6029 /* If we've gotten here, then things are probably completely hosed,
6030 * but if the error condition is detected, it won't hurt to give
6031 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
6032 */
6033 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
6034 if (hsfsts & HSFSTS_ERR) {
6035 /* Repeat for some time before giving up. */
6036 continue;
6037 } else if ((hsfsts & HSFSTS_DONE) == 0) {
6038 break;
6039 }
6040 }
6041 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
6042
6043 return error;
6044 }
6045
6046 /******************************************************************************
6047 * Reads a single byte from the NVM using the ICH8 flash access registers.
6048 *
6049 * sc - pointer to wm_hw structure
6050 * index - The index of the byte to read.
6051 * data - Pointer to a byte to store the value read.
6052 *****************************************************************************/
6053 static int32_t
6054 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
6055 {
6056 int32_t status;
6057 uint16_t word = 0;
6058
6059 status = wm_read_ich8_data(sc, index, 1, &word);
6060 if (status == 0) {
6061 *data = (uint8_t)word;
6062 }
6063
6064 return status;
6065 }
6066
6067 /******************************************************************************
6068 * Reads a word from the NVM using the ICH8 flash access registers.
6069 *
6070 * sc - pointer to wm_hw structure
6071 * index - The starting byte index of the word to read.
6072 * data - Pointer to a word to store the value read.
6073 *****************************************************************************/
6074 static int32_t
6075 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
6076 {
6077 int32_t status;
6078
6079 status = wm_read_ich8_data(sc, index, 2, data);
6080 return status;
6081 }
6082
6083 static int
6084 wm_check_mng_mode(struct wm_softc *sc)
6085 {
6086 int rv;
6087
6088 switch (sc->sc_type) {
6089 case WM_T_ICH8:
6090 case WM_T_ICH9:
6091 case WM_T_ICH10:
6092 case WM_T_PCH:
6093 rv = wm_check_mng_mode_ich8lan(sc);
6094 break;
6095 case WM_T_82574:
6096 case WM_T_82583:
6097 rv = wm_check_mng_mode_82574(sc);
6098 break;
6099 case WM_T_82571:
6100 case WM_T_82572:
6101 case WM_T_82573:
6102 case WM_T_80003:
6103 rv = wm_check_mng_mode_generic(sc);
6104 break;
6105 default:
6106 /* noting to do */
6107 rv = 0;
6108 break;
6109 }
6110
6111 return rv;
6112 }
6113
6114 static int
6115 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6116 {
6117 uint32_t fwsm;
6118
6119 fwsm = CSR_READ(sc, WMREG_FWSM);
6120
6121 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6122 return 1;
6123
6124 return 0;
6125 }
6126
6127 static int
6128 wm_check_mng_mode_82574(struct wm_softc *sc)
6129 {
6130 uint16_t data;
6131
6132 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6133
6134 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6135 return 1;
6136
6137 return 0;
6138 }
6139
6140 static int
6141 wm_check_mng_mode_generic(struct wm_softc *sc)
6142 {
6143 uint32_t fwsm;
6144
6145 fwsm = CSR_READ(sc, WMREG_FWSM);
6146
6147 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6148 return 1;
6149
6150 return 0;
6151 }
6152
6153 static int
6154 wm_check_reset_block(struct wm_softc *sc)
6155 {
6156 uint32_t reg;
6157
6158 switch (sc->sc_type) {
6159 case WM_T_ICH8:
6160 case WM_T_ICH9:
6161 case WM_T_ICH10:
6162 case WM_T_PCH:
6163 reg = CSR_READ(sc, WMREG_FWSM);
6164 if ((reg & FWSM_RSPCIPHY) != 0)
6165 return 0;
6166 else
6167 return -1;
6168 break;
6169 case WM_T_82571:
6170 case WM_T_82572:
6171 case WM_T_82573:
6172 case WM_T_82574:
6173 case WM_T_82583:
6174 case WM_T_80003:
6175 reg = CSR_READ(sc, WMREG_MANC);
6176 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6177 return -1;
6178 else
6179 return 0;
6180 break;
6181 default:
6182 /* no problem */
6183 break;
6184 }
6185
6186 return 0;
6187 }
6188
6189 static void
6190 wm_get_hw_control(struct wm_softc *sc)
6191 {
6192 uint32_t reg;
6193
6194 switch (sc->sc_type) {
6195 case WM_T_82573:
6196 #if 0
6197 case WM_T_82574:
6198 case WM_T_82583:
6199 /*
6200 * FreeBSD's em driver has the function for 82574 to checks
6201 * the management mode, but it's not used. Why?
6202 */
6203 #endif
6204 reg = CSR_READ(sc, WMREG_SWSM);
6205 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6206 break;
6207 case WM_T_82571:
6208 case WM_T_82572:
6209 case WM_T_80003:
6210 case WM_T_ICH8:
6211 case WM_T_ICH9:
6212 case WM_T_ICH10:
6213 case WM_T_PCH:
6214 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6215 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6216 break;
6217 default:
6218 break;
6219 }
6220 }
6221
6222 /* XXX Currently TBI only */
6223 static int
6224 wm_check_for_link(struct wm_softc *sc)
6225 {
6226 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6227 uint32_t rxcw;
6228 uint32_t ctrl;
6229 uint32_t status;
6230 uint32_t sig;
6231
6232 rxcw = CSR_READ(sc, WMREG_RXCW);
6233 ctrl = CSR_READ(sc, WMREG_CTRL);
6234 status = CSR_READ(sc, WMREG_STATUS);
6235
6236 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6237
6238 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6239 device_xname(sc->sc_dev), __func__,
6240 ((ctrl & CTRL_SWDPIN(1)) == sig),
6241 ((status & STATUS_LU) != 0),
6242 ((rxcw & RXCW_C) != 0)
6243 ));
6244
6245 /*
6246 * SWDPIN LU RXCW
6247 * 0 0 0
6248 * 0 0 1 (should not happen)
6249 * 0 1 0 (should not happen)
6250 * 0 1 1 (should not happen)
6251 * 1 0 0 Disable autonego and force linkup
6252 * 1 0 1 got /C/ but not linkup yet
6253 * 1 1 0 (linkup)
6254 * 1 1 1 If IFM_AUTO, back to autonego
6255 *
6256 */
6257 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6258 && ((status & STATUS_LU) == 0)
6259 && ((rxcw & RXCW_C) == 0)) {
6260 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6261 __func__));
6262 sc->sc_tbi_linkup = 0;
6263 /* Disable auto-negotiation in the TXCW register */
6264 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6265
6266 /*
6267 * Force link-up and also force full-duplex.
6268 *
6269 * NOTE: CTRL was updated TFCE and RFCE automatically,
6270 * so we should update sc->sc_ctrl
6271 */
6272 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6273 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6274 } else if(((status & STATUS_LU) != 0)
6275 && ((rxcw & RXCW_C) != 0)
6276 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6277 sc->sc_tbi_linkup = 1;
6278 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6279 __func__));
6280 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6281 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6282 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6283 && ((rxcw & RXCW_C) != 0)) {
6284 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6285 } else {
6286 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6287 status));
6288 }
6289
6290 return 0;
6291 }
6292