if_wm.c revision 1.190 1 /* $NetBSD: if_wm.c,v 1.190 2010/01/11 12:29:28 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.190 2010/01/11 12:29:28 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
140
141 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
142 #else
143 #define DPRINTF(x, y) /* nothing */
144 #endif /* WM_DEBUG */
145
146 /*
147 * Transmit descriptor list size. Due to errata, we can only have
148 * 256 hardware descriptors in the ring on < 82544, but we use 4096
149 * on >= 82544. We tell the upper layers that they can queue a lot
150 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
151 * of them at a time.
152 *
153 * We allow up to 256 (!) DMA segments per packet. Pathological packet
154 * chains containing many small mbufs have been observed in zero-copy
155 * situations with jumbo frames.
156 */
157 #define WM_NTXSEGS 256
158 #define WM_IFQUEUELEN 256
159 #define WM_TXQUEUELEN_MAX 64
160 #define WM_TXQUEUELEN_MAX_82547 16
161 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
162 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
163 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
164 #define WM_NTXDESC_82542 256
165 #define WM_NTXDESC_82544 4096
166 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
167 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
168 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
169 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
170 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
171
172 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
173
174 /*
175 * Receive descriptor list size. We have one Rx buffer for normal
176 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
177 * packet. We allocate 256 receive descriptors, each with a 2k
178 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
179 */
180 #define WM_NRXDESC 256
181 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
182 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
183 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
184
185 /*
186 * Control structures are DMA'd to the i82542 chip. We allocate them in
187 * a single clump that maps to a single DMA segment to make several things
188 * easier.
189 */
190 struct wm_control_data_82544 {
191 /*
192 * The receive descriptors.
193 */
194 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
195
196 /*
197 * The transmit descriptors. Put these at the end, because
198 * we might use a smaller number of them.
199 */
200 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 /*
236 * Software state per device.
237 */
238 struct wm_softc {
239 device_t sc_dev; /* generic device information */
240 bus_space_tag_t sc_st; /* bus space tag */
241 bus_space_handle_t sc_sh; /* bus space handle */
242 bus_space_tag_t sc_iot; /* I/O space tag */
243 bus_space_handle_t sc_ioh; /* I/O space handle */
244 bus_space_tag_t sc_flasht; /* flash registers space tag */
245 bus_space_handle_t sc_flashh; /* flash registers space handle */
246 bus_dma_tag_t sc_dmat; /* bus DMA tag */
247 struct ethercom sc_ethercom; /* ethernet common data */
248 pci_chipset_tag_t sc_pc;
249 pcitag_t sc_pcitag;
250
251 wm_chip_type sc_type; /* chip type */
252 int sc_flags; /* flags; see below */
253 int sc_if_flags; /* last if_flags */
254 int sc_bus_speed; /* PCI/PCIX bus speed */
255 int sc_pcix_offset; /* PCIX capability register offset */
256 int sc_flowflags; /* 802.3x flow control flags */
257
258 void *sc_ih; /* interrupt cookie */
259
260 int sc_ee_addrbits; /* EEPROM address bits */
261
262 struct mii_data sc_mii; /* MII/media information */
263
264 callout_t sc_tick_ch; /* tick callout */
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 int sc_align_tweak;
270
271 /*
272 * Software state for the transmit and receive descriptors.
273 */
274 int sc_txnum; /* must be a power of two */
275 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
276 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
277
278 /*
279 * Control data structures.
280 */
281 int sc_ntxdesc; /* must be a power of two */
282 struct wm_control_data_82544 *sc_control_data;
283 #define sc_txdescs sc_control_data->wcd_txdescs
284 #define sc_rxdescs sc_control_data->wcd_rxdescs
285
286 #ifdef WM_EVENT_COUNTERS
287 /* Event counters. */
288 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
289 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
290 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
291 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
292 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
293 struct evcnt sc_ev_rxintr; /* Rx interrupts */
294 struct evcnt sc_ev_linkintr; /* Link interrupts */
295
296 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
297 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
298 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
299 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
300 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
301 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
302 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
303 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
304
305 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
306 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
307
308 struct evcnt sc_ev_tu; /* Tx underrun */
309
310 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
311 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
312 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
313 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
314 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
315 #endif /* WM_EVENT_COUNTERS */
316
317 bus_addr_t sc_tdt_reg; /* offset of TDT register */
318
319 int sc_txfree; /* number of free Tx descriptors */
320 int sc_txnext; /* next ready Tx descriptor */
321
322 int sc_txsfree; /* number of free Tx jobs */
323 int sc_txsnext; /* next free Tx job */
324 int sc_txsdirty; /* dirty Tx jobs */
325
326 /* These 5 variables are used only on the 82547. */
327 int sc_txfifo_size; /* Tx FIFO size */
328 int sc_txfifo_head; /* current head of FIFO */
329 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
330 int sc_txfifo_stall; /* Tx FIFO is stalled */
331 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
332
333 bus_addr_t sc_rdt_reg; /* offset of RDT register */
334
335 int sc_rxptr; /* next ready Rx descriptor/queue ent */
336 int sc_rxdiscard;
337 int sc_rxlen;
338 struct mbuf *sc_rxhead;
339 struct mbuf *sc_rxtail;
340 struct mbuf **sc_rxtailp;
341
342 uint32_t sc_ctrl; /* prototype CTRL register */
343 #if 0
344 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
345 #endif
346 uint32_t sc_icr; /* prototype interrupt bits */
347 uint32_t sc_itr; /* prototype intr throttling reg */
348 uint32_t sc_tctl; /* prototype TCTL register */
349 uint32_t sc_rctl; /* prototype RCTL register */
350 uint32_t sc_txcw; /* prototype TXCW register */
351 uint32_t sc_tipg; /* prototype TIPG register */
352 uint32_t sc_fcrtl; /* prototype FCRTL register */
353 uint32_t sc_pba; /* prototype PBA register */
354
355 int sc_tbi_linkup; /* TBI link status */
356 int sc_tbi_anegticks; /* autonegotiation ticks */
357 int sc_tbi_ticks; /* tbi ticks */
358 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
359 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
360
361 int sc_mchash_type; /* multicast filter offset */
362
363 #if NRND > 0
364 rndsource_element_t rnd_source; /* random source */
365 #endif
366 int sc_ich8_flash_base;
367 int sc_ich8_flash_bank_size;
368 };
369
370 #define WM_RXCHAIN_RESET(sc) \
371 do { \
372 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
373 *(sc)->sc_rxtailp = NULL; \
374 (sc)->sc_rxlen = 0; \
375 } while (/*CONSTCOND*/0)
376
377 #define WM_RXCHAIN_LINK(sc, m) \
378 do { \
379 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
380 (sc)->sc_rxtailp = &(m)->m_next; \
381 } while (/*CONSTCOND*/0)
382
383 #ifdef WM_EVENT_COUNTERS
384 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
385 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
386 #else
387 #define WM_EVCNT_INCR(ev) /* nothing */
388 #define WM_EVCNT_ADD(ev, val) /* nothing */
389 #endif
390
391 #define CSR_READ(sc, reg) \
392 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
393 #define CSR_WRITE(sc, reg, val) \
394 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
395 #define CSR_WRITE_FLUSH(sc) \
396 (void) CSR_READ((sc), WMREG_STATUS)
397
398 #define ICH8_FLASH_READ32(sc, reg) \
399 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
400 #define ICH8_FLASH_WRITE32(sc, reg, data) \
401 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
402
403 #define ICH8_FLASH_READ16(sc, reg) \
404 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
405 #define ICH8_FLASH_WRITE16(sc, reg, data) \
406 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
407
408 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
409 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
410
411 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
412 #define WM_CDTXADDR_HI(sc, x) \
413 (sizeof(bus_addr_t) == 8 ? \
414 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
415
416 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
417 #define WM_CDRXADDR_HI(sc, x) \
418 (sizeof(bus_addr_t) == 8 ? \
419 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
420
421 #define WM_CDTXSYNC(sc, x, n, ops) \
422 do { \
423 int __x, __n; \
424 \
425 __x = (x); \
426 __n = (n); \
427 \
428 /* If it will wrap around, sync to the end of the ring. */ \
429 if ((__x + __n) > WM_NTXDESC(sc)) { \
430 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
431 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
432 (WM_NTXDESC(sc) - __x), (ops)); \
433 __n -= (WM_NTXDESC(sc) - __x); \
434 __x = 0; \
435 } \
436 \
437 /* Now sync whatever is left. */ \
438 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
439 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
440 } while (/*CONSTCOND*/0)
441
442 #define WM_CDRXSYNC(sc, x, ops) \
443 do { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
446 } while (/*CONSTCOND*/0)
447
448 #define WM_INIT_RXDESC(sc, x) \
449 do { \
450 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
451 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
452 struct mbuf *__m = __rxs->rxs_mbuf; \
453 \
454 /* \
455 * Note: We scoot the packet forward 2 bytes in the buffer \
456 * so that the payload after the Ethernet header is aligned \
457 * to a 4-byte boundary. \
458 * \
459 * XXX BRAINDAMAGE ALERT! \
460 * The stupid chip uses the same size for every buffer, which \
461 * is set in the Receive Control register. We are using the 2K \
462 * size option, but what we REALLY want is (2K - 2)! For this \
463 * reason, we can't "scoot" packets longer than the standard \
464 * Ethernet MTU. On strict-alignment platforms, if the total \
465 * size exceeds (2K - 2) we set align_tweak to 0 and let \
466 * the upper layer copy the headers. \
467 */ \
468 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
469 \
470 wm_set_dma_addr(&__rxd->wrx_addr, \
471 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
472 __rxd->wrx_len = 0; \
473 __rxd->wrx_cksum = 0; \
474 __rxd->wrx_status = 0; \
475 __rxd->wrx_errors = 0; \
476 __rxd->wrx_special = 0; \
477 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
478 \
479 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
480 } while (/*CONSTCOND*/0)
481
482 static void wm_start(struct ifnet *);
483 static void wm_watchdog(struct ifnet *);
484 static int wm_ioctl(struct ifnet *, u_long, void *);
485 static int wm_init(struct ifnet *);
486 static void wm_stop(struct ifnet *, int);
487
488 static void wm_reset(struct wm_softc *);
489 static void wm_rxdrain(struct wm_softc *);
490 static int wm_add_rxbuf(struct wm_softc *, int);
491 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
492 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
493 static int wm_validate_eeprom_checksum(struct wm_softc *);
494 static void wm_tick(void *);
495
496 static void wm_set_filter(struct wm_softc *);
497
498 static int wm_intr(void *);
499 static void wm_txintr(struct wm_softc *);
500 static void wm_rxintr(struct wm_softc *);
501 static void wm_linkintr(struct wm_softc *, uint32_t);
502
503 static void wm_tbi_mediainit(struct wm_softc *);
504 static int wm_tbi_mediachange(struct ifnet *);
505 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
506
507 static void wm_tbi_set_linkled(struct wm_softc *);
508 static void wm_tbi_check_link(struct wm_softc *);
509
510 static void wm_gmii_reset(struct wm_softc *);
511
512 static int wm_gmii_i82543_readreg(device_t, int, int);
513 static void wm_gmii_i82543_writereg(device_t, int, int, int);
514
515 static int wm_gmii_i82544_readreg(device_t, int, int);
516 static void wm_gmii_i82544_writereg(device_t, int, int, int);
517
518 static int wm_gmii_i80003_readreg(device_t, int, int);
519 static void wm_gmii_i80003_writereg(device_t, int, int, int);
520
521 static int wm_gmii_bm_readreg(device_t, int, int);
522 static void wm_gmii_bm_writereg(device_t, int, int, int);
523
524 static void wm_gmii_statchg(device_t);
525
526 static void wm_gmii_mediainit(struct wm_softc *);
527 static int wm_gmii_mediachange(struct ifnet *);
528 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
529
530 static int wm_kmrn_readreg(struct wm_softc *, int);
531 static void wm_kmrn_writereg(struct wm_softc *, int, int);
532
533 static void wm_set_spiaddrsize(struct wm_softc *);
534 static int wm_match(device_t, cfdata_t, void *);
535 static void wm_attach(device_t, device_t, void *);
536 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
537 static void wm_get_auto_rd_done(struct wm_softc *);
538 static void wm_lan_init_done(struct wm_softc *);
539 static void wm_get_cfg_done(struct wm_softc *);
540 static int wm_get_swsm_semaphore(struct wm_softc *);
541 static void wm_put_swsm_semaphore(struct wm_softc *);
542 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
543 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
544 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
545 static int wm_get_swfwhw_semaphore(struct wm_softc *);
546 static void wm_put_swfwhw_semaphore(struct wm_softc *);
547
548 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
549 static int32_t wm_ich8_cycle_init(struct wm_softc *);
550 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
551 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
552 uint32_t, uint16_t *);
553 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
554 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
555 static void wm_82547_txfifo_stall(void *);
556 static int wm_check_mng_mode(struct wm_softc *);
557 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
558 static int wm_check_mng_mode_82574(struct wm_softc *);
559 static int wm_check_mng_mode_generic(struct wm_softc *);
560 static int wm_check_reset_block(struct wm_softc *);
561 static void wm_get_hw_control(struct wm_softc *);
562 static int wm_check_for_link(struct wm_softc *);
563
564 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
565 wm_match, wm_attach, NULL, NULL);
566
567 /*
568 * Devices supported by this driver.
569 */
570 static const struct wm_product {
571 pci_vendor_id_t wmp_vendor;
572 pci_product_id_t wmp_product;
573 const char *wmp_name;
574 wm_chip_type wmp_type;
575 int wmp_flags;
576 #define WMP_F_1000X 0x01
577 #define WMP_F_1000T 0x02
578 } wm_products[] = {
579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
580 "Intel i82542 1000BASE-X Ethernet",
581 WM_T_82542_2_1, WMP_F_1000X },
582
583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
584 "Intel i82543GC 1000BASE-X Ethernet",
585 WM_T_82543, WMP_F_1000X },
586
587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
588 "Intel i82543GC 1000BASE-T Ethernet",
589 WM_T_82543, WMP_F_1000T },
590
591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
592 "Intel i82544EI 1000BASE-T Ethernet",
593 WM_T_82544, WMP_F_1000T },
594
595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
596 "Intel i82544EI 1000BASE-X Ethernet",
597 WM_T_82544, WMP_F_1000X },
598
599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
600 "Intel i82544GC 1000BASE-T Ethernet",
601 WM_T_82544, WMP_F_1000T },
602
603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
604 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
605 WM_T_82544, WMP_F_1000T },
606
607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
608 "Intel i82540EM 1000BASE-T Ethernet",
609 WM_T_82540, WMP_F_1000T },
610
611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
612 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
613 WM_T_82540, WMP_F_1000T },
614
615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
616 "Intel i82540EP 1000BASE-T Ethernet",
617 WM_T_82540, WMP_F_1000T },
618
619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
620 "Intel i82540EP 1000BASE-T Ethernet",
621 WM_T_82540, WMP_F_1000T },
622
623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
624 "Intel i82540EP 1000BASE-T Ethernet",
625 WM_T_82540, WMP_F_1000T },
626
627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
628 "Intel i82545EM 1000BASE-T Ethernet",
629 WM_T_82545, WMP_F_1000T },
630
631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
632 "Intel i82545GM 1000BASE-T Ethernet",
633 WM_T_82545_3, WMP_F_1000T },
634
635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
636 "Intel i82545GM 1000BASE-X Ethernet",
637 WM_T_82545_3, WMP_F_1000X },
638 #if 0
639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
640 "Intel i82545GM Gigabit Ethernet (SERDES)",
641 WM_T_82545_3, WMP_F_SERDES },
642 #endif
643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
644 "Intel i82546EB 1000BASE-T Ethernet",
645 WM_T_82546, WMP_F_1000T },
646
647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
648 "Intel i82546EB 1000BASE-T Ethernet",
649 WM_T_82546, WMP_F_1000T },
650
651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
652 "Intel i82545EM 1000BASE-X Ethernet",
653 WM_T_82545, WMP_F_1000X },
654
655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
656 "Intel i82546EB 1000BASE-X Ethernet",
657 WM_T_82546, WMP_F_1000X },
658
659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
660 "Intel i82546GB 1000BASE-T Ethernet",
661 WM_T_82546_3, WMP_F_1000T },
662
663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
664 "Intel i82546GB 1000BASE-X Ethernet",
665 WM_T_82546_3, WMP_F_1000X },
666 #if 0
667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
668 "Intel i82546GB Gigabit Ethernet (SERDES)",
669 WM_T_82546_3, WMP_F_SERDES },
670 #endif
671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
672 "i82546GB quad-port Gigabit Ethernet",
673 WM_T_82546_3, WMP_F_1000T },
674
675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
676 "i82546GB quad-port Gigabit Ethernet (KSP3)",
677 WM_T_82546_3, WMP_F_1000T },
678
679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
680 "Intel PRO/1000MT (82546GB)",
681 WM_T_82546_3, WMP_F_1000T },
682
683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
684 "Intel i82541EI 1000BASE-T Ethernet",
685 WM_T_82541, WMP_F_1000T },
686
687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
688 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
689 WM_T_82541, WMP_F_1000T },
690
691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
692 "Intel i82541EI Mobile 1000BASE-T Ethernet",
693 WM_T_82541, WMP_F_1000T },
694
695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
696 "Intel i82541ER 1000BASE-T Ethernet",
697 WM_T_82541_2, WMP_F_1000T },
698
699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
700 "Intel i82541GI 1000BASE-T Ethernet",
701 WM_T_82541_2, WMP_F_1000T },
702
703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
704 "Intel i82541GI Mobile 1000BASE-T Ethernet",
705 WM_T_82541_2, WMP_F_1000T },
706
707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
708 "Intel i82541PI 1000BASE-T Ethernet",
709 WM_T_82541_2, WMP_F_1000T },
710
711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
712 "Intel i82547EI 1000BASE-T Ethernet",
713 WM_T_82547, WMP_F_1000T },
714
715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
716 "Intel i82547EI Mobile 1000BASE-T Ethernet",
717 WM_T_82547, WMP_F_1000T },
718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
720 "Intel i82547GI 1000BASE-T Ethernet",
721 WM_T_82547_2, WMP_F_1000T },
722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
724 "Intel PRO/1000 PT (82571EB)",
725 WM_T_82571, WMP_F_1000T },
726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
728 "Intel PRO/1000 PF (82571EB)",
729 WM_T_82571, WMP_F_1000X },
730 #if 0
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
732 "Intel PRO/1000 PB (82571EB)",
733 WM_T_82571, WMP_F_SERDES },
734 #endif
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
736 "Intel PRO/1000 QT (82571EB)",
737 WM_T_82571, WMP_F_1000T },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
740 "Intel i82572EI 1000baseT Ethernet",
741 WM_T_82572, WMP_F_1000T },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
744 "Intel PRO/1000 PT Quad Port Server Adapter",
745 WM_T_82571, WMP_F_1000T, },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
748 "Intel i82572EI 1000baseX Ethernet",
749 WM_T_82572, WMP_F_1000X },
750 #if 0
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
752 "Intel i82572EI Gigabit Ethernet (SERDES)",
753 WM_T_82572, WMP_F_SERDES },
754 #endif
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
757 "Intel i82572EI 1000baseT Ethernet",
758 WM_T_82572, WMP_F_1000T },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
761 "Intel i82573E",
762 WM_T_82573, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
765 "Intel i82573E IAMT",
766 WM_T_82573, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
769 "Intel i82573L Gigabit Ethernet",
770 WM_T_82573, WMP_F_1000T },
771
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
773 "Intel i82574L",
774 WM_T_82574, WMP_F_1000T },
775
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
777 "Intel i82583V",
778 WM_T_82583, WMP_F_1000T },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
781 "i80003 dual 1000baseT Ethernet",
782 WM_T_80003, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
785 "i80003 dual 1000baseX Ethernet",
786 WM_T_80003, WMP_F_1000T },
787 #if 0
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
789 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
790 WM_T_80003, WMP_F_SERDES },
791 #endif
792
793 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
794 "Intel i80003 1000baseT Ethernet",
795 WM_T_80003, WMP_F_1000T },
796 #if 0
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
798 "Intel i80003 Gigabit Ethernet (SERDES)",
799 WM_T_80003, WMP_F_SERDES },
800 #endif
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
802 "Intel i82801H (M_AMT) LAN Controller",
803 WM_T_ICH8, WMP_F_1000T },
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
805 "Intel i82801H (AMT) LAN Controller",
806 WM_T_ICH8, WMP_F_1000T },
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
808 "Intel i82801H LAN Controller",
809 WM_T_ICH8, WMP_F_1000T },
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
811 "Intel i82801H (IFE) LAN Controller",
812 WM_T_ICH8, WMP_F_1000T },
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
814 "Intel i82801H (M) LAN Controller",
815 WM_T_ICH8, WMP_F_1000T },
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
817 "Intel i82801H IFE (GT) LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
820 "Intel i82801H IFE (G) LAN Controller",
821 WM_T_ICH8, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
823 "82801I (AMT) LAN Controller",
824 WM_T_ICH9, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
826 "82801I LAN Controller",
827 WM_T_ICH9, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
829 "82801I (G) LAN Controller",
830 WM_T_ICH9, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
832 "82801I (GT) LAN Controller",
833 WM_T_ICH9, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
835 "82801I (C) LAN Controller",
836 WM_T_ICH9, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
838 "82801I mobile LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
841 "82801I mobile (V) LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
844 "82801I mobile (AMT) LAN Controller",
845 WM_T_ICH9, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3,
847 "82567LM-3 LAN Controller",
848 WM_T_ICH10, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3,
850 "82567LF-3 LAN Controller",
851 WM_T_ICH10, WMP_F_1000T },
852 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
853 "i82801J (LF) LAN Controller",
854 WM_T_ICH10, WMP_F_1000T },
855 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LM,
856 "PCH LAN (82578LM) Controller",
857 WM_T_PCH, WMP_F_1000T },
858 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_M_LC,
859 "PCH LAN (82578LC) Controller",
860 WM_T_PCH, WMP_F_1000T },
861 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DM,
862 "PCH LAN (82578DM) Controller",
863 WM_T_PCH, WMP_F_1000T },
864 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_PCH_D_DC,
865 "PCH LAN (82578DC) Controller",
866 WM_T_PCH, WMP_F_1000T },
867 { 0, 0,
868 NULL,
869 0, 0 },
870 };
871
872 #ifdef WM_EVENT_COUNTERS
873 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
874 #endif /* WM_EVENT_COUNTERS */
875
876 #if 0 /* Not currently used */
877 static inline uint32_t
878 wm_io_read(struct wm_softc *sc, int reg)
879 {
880
881 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
882 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
883 }
884 #endif
885
886 static inline void
887 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
888 {
889
890 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
891 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
892 }
893
894 static inline void
895 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
896 {
897 wa->wa_low = htole32(v & 0xffffffffU);
898 if (sizeof(bus_addr_t) == 8)
899 wa->wa_high = htole32((uint64_t) v >> 32);
900 else
901 wa->wa_high = 0;
902 }
903
904 static void
905 wm_set_spiaddrsize(struct wm_softc *sc)
906 {
907 uint32_t reg;
908
909 sc->sc_flags |= WM_F_EEPROM_SPI;
910 reg = CSR_READ(sc, WMREG_EECD);
911 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
912 }
913
914 static const struct wm_product *
915 wm_lookup(const struct pci_attach_args *pa)
916 {
917 const struct wm_product *wmp;
918
919 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
920 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
921 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
922 return (wmp);
923 }
924 return (NULL);
925 }
926
927 static int
928 wm_match(device_t parent, cfdata_t cf, void *aux)
929 {
930 struct pci_attach_args *pa = aux;
931
932 if (wm_lookup(pa) != NULL)
933 return (1);
934
935 return (0);
936 }
937
938 static void
939 wm_attach(device_t parent, device_t self, void *aux)
940 {
941 struct wm_softc *sc = device_private(self);
942 struct pci_attach_args *pa = aux;
943 prop_dictionary_t dict;
944 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
945 pci_chipset_tag_t pc = pa->pa_pc;
946 pci_intr_handle_t ih;
947 size_t cdata_size;
948 const char *intrstr = NULL;
949 const char *eetype, *xname;
950 bus_space_tag_t memt;
951 bus_space_handle_t memh;
952 bus_dma_segment_t seg;
953 int memh_valid;
954 int i, rseg, error;
955 const struct wm_product *wmp;
956 prop_data_t ea;
957 prop_number_t pn;
958 uint8_t enaddr[ETHER_ADDR_LEN];
959 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
960 pcireg_t preg, memtype;
961 uint32_t reg;
962
963 sc->sc_dev = self;
964 callout_init(&sc->sc_tick_ch, 0);
965
966 wmp = wm_lookup(pa);
967 if (wmp == NULL) {
968 printf("\n");
969 panic("wm_attach: impossible");
970 }
971
972 sc->sc_pc = pa->pa_pc;
973 sc->sc_pcitag = pa->pa_tag;
974
975 if (pci_dma64_available(pa))
976 sc->sc_dmat = pa->pa_dmat64;
977 else
978 sc->sc_dmat = pa->pa_dmat;
979
980 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
981 aprint_naive(": Ethernet controller\n");
982 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
983
984 sc->sc_type = wmp->wmp_type;
985 if (sc->sc_type < WM_T_82543) {
986 if (preg < 2) {
987 aprint_error_dev(sc->sc_dev,
988 "i82542 must be at least rev. 2\n");
989 return;
990 }
991 if (preg < 3)
992 sc->sc_type = WM_T_82542_2_0;
993 }
994
995 /* Set device properties (mactype) */
996 dict = device_properties(sc->sc_dev);
997 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
998
999 /*
1000 * Map the device. All devices support memory-mapped acccess,
1001 * and it is really required for normal operation.
1002 */
1003 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1004 switch (memtype) {
1005 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1006 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1007 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1008 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1009 break;
1010 default:
1011 memh_valid = 0;
1012 break;
1013 }
1014
1015 if (memh_valid) {
1016 sc->sc_st = memt;
1017 sc->sc_sh = memh;
1018 } else {
1019 aprint_error_dev(sc->sc_dev,
1020 "unable to map device registers\n");
1021 return;
1022 }
1023
1024 /*
1025 * In addition, i82544 and later support I/O mapped indirect
1026 * register access. It is not desirable (nor supported in
1027 * this driver) to use it for normal operation, though it is
1028 * required to work around bugs in some chip versions.
1029 */
1030 if (sc->sc_type >= WM_T_82544) {
1031 /* First we have to find the I/O BAR. */
1032 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1033 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1034 PCI_MAPREG_TYPE_IO)
1035 break;
1036 }
1037 if (i == PCI_MAPREG_END)
1038 aprint_error_dev(sc->sc_dev,
1039 "WARNING: unable to find I/O BAR\n");
1040 else {
1041 /*
1042 * The i8254x doesn't apparently respond when the
1043 * I/O BAR is 0, which looks somewhat like it's not
1044 * been configured.
1045 */
1046 preg = pci_conf_read(pc, pa->pa_tag, i);
1047 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1048 aprint_error_dev(sc->sc_dev,
1049 "WARNING: I/O BAR at zero.\n");
1050 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1051 0, &sc->sc_iot, &sc->sc_ioh,
1052 NULL, NULL) == 0) {
1053 sc->sc_flags |= WM_F_IOH_VALID;
1054 } else {
1055 aprint_error_dev(sc->sc_dev,
1056 "WARNING: unable to map I/O space\n");
1057 }
1058 }
1059
1060 }
1061
1062 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1063 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1064 preg |= PCI_COMMAND_MASTER_ENABLE;
1065 if (sc->sc_type < WM_T_82542_2_1)
1066 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1067 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1068
1069 /* power up chip */
1070 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1071 NULL)) && error != EOPNOTSUPP) {
1072 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1073 return;
1074 }
1075
1076 /*
1077 * Map and establish our interrupt.
1078 */
1079 if (pci_intr_map(pa, &ih)) {
1080 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1081 return;
1082 }
1083 intrstr = pci_intr_string(pc, ih);
1084 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1085 if (sc->sc_ih == NULL) {
1086 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1087 if (intrstr != NULL)
1088 aprint_error(" at %s", intrstr);
1089 aprint_error("\n");
1090 return;
1091 }
1092 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1093
1094 /*
1095 * Determine a few things about the bus we're connected to.
1096 */
1097 if (sc->sc_type < WM_T_82543) {
1098 /* We don't really know the bus characteristics here. */
1099 sc->sc_bus_speed = 33;
1100 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1101 /*
1102 * CSA (Communication Streaming Architecture) is about as fast
1103 * a 32-bit 66MHz PCI Bus.
1104 */
1105 sc->sc_flags |= WM_F_CSA;
1106 sc->sc_bus_speed = 66;
1107 aprint_verbose_dev(sc->sc_dev,
1108 "Communication Streaming Architecture\n");
1109 if (sc->sc_type == WM_T_82547) {
1110 callout_init(&sc->sc_txfifo_ch, 0);
1111 callout_setfunc(&sc->sc_txfifo_ch,
1112 wm_82547_txfifo_stall, sc);
1113 aprint_verbose_dev(sc->sc_dev,
1114 "using 82547 Tx FIFO stall work-around\n");
1115 }
1116 } else if (sc->sc_type >= WM_T_82571) {
1117 sc->sc_flags |= WM_F_PCIE;
1118 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1119 && (sc->sc_type != WM_T_ICH10)
1120 && (sc->sc_type != WM_T_PCH))
1121 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1122 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1123 } else {
1124 reg = CSR_READ(sc, WMREG_STATUS);
1125 if (reg & STATUS_BUS64)
1126 sc->sc_flags |= WM_F_BUS64;
1127 if ((reg & STATUS_PCIX_MODE) != 0) {
1128 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1129
1130 sc->sc_flags |= WM_F_PCIX;
1131 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1132 PCI_CAP_PCIX,
1133 &sc->sc_pcix_offset, NULL) == 0)
1134 aprint_error_dev(sc->sc_dev,
1135 "unable to find PCIX capability\n");
1136 else if (sc->sc_type != WM_T_82545_3 &&
1137 sc->sc_type != WM_T_82546_3) {
1138 /*
1139 * Work around a problem caused by the BIOS
1140 * setting the max memory read byte count
1141 * incorrectly.
1142 */
1143 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1144 sc->sc_pcix_offset + PCI_PCIX_CMD);
1145 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1146 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1147
1148 bytecnt =
1149 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1150 PCI_PCIX_CMD_BYTECNT_SHIFT;
1151 maxb =
1152 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1153 PCI_PCIX_STATUS_MAXB_SHIFT;
1154 if (bytecnt > maxb) {
1155 aprint_verbose_dev(sc->sc_dev,
1156 "resetting PCI-X MMRBC: %d -> %d\n",
1157 512 << bytecnt, 512 << maxb);
1158 pcix_cmd = (pcix_cmd &
1159 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1160 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1161 pci_conf_write(pa->pa_pc, pa->pa_tag,
1162 sc->sc_pcix_offset + PCI_PCIX_CMD,
1163 pcix_cmd);
1164 }
1165 }
1166 }
1167 /*
1168 * The quad port adapter is special; it has a PCIX-PCIX
1169 * bridge on the board, and can run the secondary bus at
1170 * a higher speed.
1171 */
1172 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1173 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1174 : 66;
1175 } else if (sc->sc_flags & WM_F_PCIX) {
1176 switch (reg & STATUS_PCIXSPD_MASK) {
1177 case STATUS_PCIXSPD_50_66:
1178 sc->sc_bus_speed = 66;
1179 break;
1180 case STATUS_PCIXSPD_66_100:
1181 sc->sc_bus_speed = 100;
1182 break;
1183 case STATUS_PCIXSPD_100_133:
1184 sc->sc_bus_speed = 133;
1185 break;
1186 default:
1187 aprint_error_dev(sc->sc_dev,
1188 "unknown PCIXSPD %d; assuming 66MHz\n",
1189 reg & STATUS_PCIXSPD_MASK);
1190 sc->sc_bus_speed = 66;
1191 break;
1192 }
1193 } else
1194 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1195 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1196 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1197 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1198 }
1199
1200 /*
1201 * Allocate the control data structures, and create and load the
1202 * DMA map for it.
1203 *
1204 * NOTE: All Tx descriptors must be in the same 4G segment of
1205 * memory. So must Rx descriptors. We simplify by allocating
1206 * both sets within the same 4G segment.
1207 */
1208 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1209 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1210 cdata_size = sc->sc_type < WM_T_82544 ?
1211 sizeof(struct wm_control_data_82542) :
1212 sizeof(struct wm_control_data_82544);
1213 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1214 (bus_size_t) 0x100000000ULL,
1215 &seg, 1, &rseg, 0)) != 0) {
1216 aprint_error_dev(sc->sc_dev,
1217 "unable to allocate control data, error = %d\n",
1218 error);
1219 goto fail_0;
1220 }
1221
1222 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1223 (void **)&sc->sc_control_data,
1224 BUS_DMA_COHERENT)) != 0) {
1225 aprint_error_dev(sc->sc_dev,
1226 "unable to map control data, error = %d\n", error);
1227 goto fail_1;
1228 }
1229
1230 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1231 0, 0, &sc->sc_cddmamap)) != 0) {
1232 aprint_error_dev(sc->sc_dev,
1233 "unable to create control data DMA map, error = %d\n",
1234 error);
1235 goto fail_2;
1236 }
1237
1238 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1239 sc->sc_control_data, cdata_size, NULL,
1240 0)) != 0) {
1241 aprint_error_dev(sc->sc_dev,
1242 "unable to load control data DMA map, error = %d\n",
1243 error);
1244 goto fail_3;
1245 }
1246
1247 /*
1248 * Create the transmit buffer DMA maps.
1249 */
1250 WM_TXQUEUELEN(sc) =
1251 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1252 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1253 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1254 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1255 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1256 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1257 aprint_error_dev(sc->sc_dev,
1258 "unable to create Tx DMA map %d, error = %d\n",
1259 i, error);
1260 goto fail_4;
1261 }
1262 }
1263
1264 /*
1265 * Create the receive buffer DMA maps.
1266 */
1267 for (i = 0; i < WM_NRXDESC; i++) {
1268 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1269 MCLBYTES, 0, 0,
1270 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1271 aprint_error_dev(sc->sc_dev,
1272 "unable to create Rx DMA map %d error = %d\n",
1273 i, error);
1274 goto fail_5;
1275 }
1276 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1277 }
1278
1279 /* clear interesting stat counters */
1280 CSR_READ(sc, WMREG_COLC);
1281 CSR_READ(sc, WMREG_RXERRC);
1282
1283 /*
1284 * Reset the chip to a known state.
1285 */
1286 wm_reset(sc);
1287
1288 switch (sc->sc_type) {
1289 case WM_T_82571:
1290 case WM_T_82572:
1291 case WM_T_82573:
1292 case WM_T_82574:
1293 case WM_T_82583:
1294 case WM_T_80003:
1295 case WM_T_ICH8:
1296 case WM_T_ICH9:
1297 case WM_T_ICH10:
1298 case WM_T_PCH:
1299 if (wm_check_mng_mode(sc) != 0)
1300 wm_get_hw_control(sc);
1301 break;
1302 default:
1303 break;
1304 }
1305
1306 /*
1307 * Get some information about the EEPROM.
1308 */
1309 switch (sc->sc_type) {
1310 case WM_T_82542_2_0:
1311 case WM_T_82542_2_1:
1312 case WM_T_82543:
1313 case WM_T_82544:
1314 /* Microwire */
1315 sc->sc_ee_addrbits = 6;
1316 break;
1317 case WM_T_82540:
1318 case WM_T_82545:
1319 case WM_T_82545_3:
1320 case WM_T_82546:
1321 case WM_T_82546_3:
1322 /* Microwire */
1323 reg = CSR_READ(sc, WMREG_EECD);
1324 if (reg & EECD_EE_SIZE)
1325 sc->sc_ee_addrbits = 8;
1326 else
1327 sc->sc_ee_addrbits = 6;
1328 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1329 break;
1330 case WM_T_82541:
1331 case WM_T_82541_2:
1332 case WM_T_82547:
1333 case WM_T_82547_2:
1334 reg = CSR_READ(sc, WMREG_EECD);
1335 if (reg & EECD_EE_TYPE) {
1336 /* SPI */
1337 wm_set_spiaddrsize(sc);
1338 } else
1339 /* Microwire */
1340 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1341 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1342 break;
1343 case WM_T_82571:
1344 case WM_T_82572:
1345 /* SPI */
1346 wm_set_spiaddrsize(sc);
1347 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1348 break;
1349 case WM_T_82573:
1350 case WM_T_82574:
1351 case WM_T_82583:
1352 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1353 sc->sc_flags |= WM_F_EEPROM_FLASH;
1354 else {
1355 /* SPI */
1356 wm_set_spiaddrsize(sc);
1357 }
1358 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1359 break;
1360 case WM_T_80003:
1361 /* SPI */
1362 wm_set_spiaddrsize(sc);
1363 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1364 break;
1365 case WM_T_ICH8:
1366 case WM_T_ICH9:
1367 /* Check whether EEPROM is present or not */
1368 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1369 /* Not found */
1370 aprint_error_dev(sc->sc_dev,
1371 "EEPROM PRESENT bit isn't set\n");
1372 sc->sc_flags |= WM_F_EEPROM_INVALID;
1373 }
1374 /* FALLTHROUGH */
1375 case WM_T_ICH10:
1376 case WM_T_PCH:
1377 /* FLASH */
1378 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1379 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1380 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1381 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1382 aprint_error_dev(sc->sc_dev,
1383 "can't map FLASH registers\n");
1384 return;
1385 }
1386 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1387 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1388 ICH_FLASH_SECTOR_SIZE;
1389 sc->sc_ich8_flash_bank_size =
1390 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1391 sc->sc_ich8_flash_bank_size -=
1392 (reg & ICH_GFPREG_BASE_MASK);
1393 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1394 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1395 break;
1396 default:
1397 break;
1398 }
1399
1400 /*
1401 * Defer printing the EEPROM type until after verifying the checksum
1402 * This allows the EEPROM type to be printed correctly in the case
1403 * that no EEPROM is attached.
1404 */
1405 /*
1406 * Validate the EEPROM checksum. If the checksum fails, flag
1407 * this for later, so we can fail future reads from the EEPROM.
1408 */
1409 if (wm_validate_eeprom_checksum(sc)) {
1410 /*
1411 * Read twice again because some PCI-e parts fail the
1412 * first check due to the link being in sleep state.
1413 */
1414 if (wm_validate_eeprom_checksum(sc))
1415 sc->sc_flags |= WM_F_EEPROM_INVALID;
1416 }
1417
1418 /* Set device properties (macflags) */
1419 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1420
1421 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1422 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1423 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1424 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1425 } else {
1426 if (sc->sc_flags & WM_F_EEPROM_SPI)
1427 eetype = "SPI";
1428 else
1429 eetype = "MicroWire";
1430 aprint_verbose_dev(sc->sc_dev,
1431 "%u word (%d address bits) %s EEPROM\n",
1432 1U << sc->sc_ee_addrbits,
1433 sc->sc_ee_addrbits, eetype);
1434 }
1435
1436 /*
1437 * Read the Ethernet address from the EEPROM, if not first found
1438 * in device properties.
1439 */
1440 ea = prop_dictionary_get(dict, "mac-addr");
1441 if (ea != NULL) {
1442 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1443 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1444 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1445 } else {
1446 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1447 sizeof(myea) / sizeof(myea[0]), myea)) {
1448 aprint_error_dev(sc->sc_dev,
1449 "unable to read Ethernet address\n");
1450 return;
1451 }
1452 enaddr[0] = myea[0] & 0xff;
1453 enaddr[1] = myea[0] >> 8;
1454 enaddr[2] = myea[1] & 0xff;
1455 enaddr[3] = myea[1] >> 8;
1456 enaddr[4] = myea[2] & 0xff;
1457 enaddr[5] = myea[2] >> 8;
1458 }
1459
1460 /*
1461 * Toggle the LSB of the MAC address on the second port
1462 * of the dual port controller.
1463 */
1464 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1465 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1466 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1467 enaddr[5] ^= 1;
1468 }
1469
1470 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1471 ether_sprintf(enaddr));
1472
1473 /*
1474 * Read the config info from the EEPROM, and set up various
1475 * bits in the control registers based on their contents.
1476 */
1477 pn = prop_dictionary_get(dict, "i82543-cfg1");
1478 if (pn != NULL) {
1479 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1480 cfg1 = (uint16_t) prop_number_integer_value(pn);
1481 } else {
1482 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1483 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1484 return;
1485 }
1486 }
1487
1488 pn = prop_dictionary_get(dict, "i82543-cfg2");
1489 if (pn != NULL) {
1490 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1491 cfg2 = (uint16_t) prop_number_integer_value(pn);
1492 } else {
1493 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1494 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1495 return;
1496 }
1497 }
1498
1499 if (sc->sc_type >= WM_T_82544) {
1500 pn = prop_dictionary_get(dict, "i82543-swdpin");
1501 if (pn != NULL) {
1502 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1503 swdpin = (uint16_t) prop_number_integer_value(pn);
1504 } else {
1505 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1506 aprint_error_dev(sc->sc_dev,
1507 "unable to read SWDPIN\n");
1508 return;
1509 }
1510 }
1511 }
1512
1513 if (cfg1 & EEPROM_CFG1_ILOS)
1514 sc->sc_ctrl |= CTRL_ILOS;
1515 if (sc->sc_type >= WM_T_82544) {
1516 sc->sc_ctrl |=
1517 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1518 CTRL_SWDPIO_SHIFT;
1519 sc->sc_ctrl |=
1520 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1521 CTRL_SWDPINS_SHIFT;
1522 } else {
1523 sc->sc_ctrl |=
1524 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1525 CTRL_SWDPIO_SHIFT;
1526 }
1527
1528 #if 0
1529 if (sc->sc_type >= WM_T_82544) {
1530 if (cfg1 & EEPROM_CFG1_IPS0)
1531 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1532 if (cfg1 & EEPROM_CFG1_IPS1)
1533 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1534 sc->sc_ctrl_ext |=
1535 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1536 CTRL_EXT_SWDPIO_SHIFT;
1537 sc->sc_ctrl_ext |=
1538 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1539 CTRL_EXT_SWDPINS_SHIFT;
1540 } else {
1541 sc->sc_ctrl_ext |=
1542 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1543 CTRL_EXT_SWDPIO_SHIFT;
1544 }
1545 #endif
1546
1547 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1548 #if 0
1549 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1550 #endif
1551
1552 /*
1553 * Set up some register offsets that are different between
1554 * the i82542 and the i82543 and later chips.
1555 */
1556 if (sc->sc_type < WM_T_82543) {
1557 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1558 sc->sc_tdt_reg = WMREG_OLD_TDT;
1559 } else {
1560 sc->sc_rdt_reg = WMREG_RDT;
1561 sc->sc_tdt_reg = WMREG_TDT;
1562 }
1563
1564 /*
1565 * Determine if we're TBI or GMII mode, and initialize the
1566 * media structures accordingly.
1567 */
1568 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1569 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1570 || sc->sc_type == WM_T_82573
1571 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1572 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1573 wm_gmii_mediainit(sc);
1574 } else if (sc->sc_type < WM_T_82543 ||
1575 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1576 if (wmp->wmp_flags & WMP_F_1000T)
1577 aprint_error_dev(sc->sc_dev,
1578 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1579 wm_tbi_mediainit(sc);
1580 } else {
1581 if (wmp->wmp_flags & WMP_F_1000X)
1582 aprint_error_dev(sc->sc_dev,
1583 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1584 wm_gmii_mediainit(sc);
1585 }
1586
1587 ifp = &sc->sc_ethercom.ec_if;
1588 xname = device_xname(sc->sc_dev);
1589 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1590 ifp->if_softc = sc;
1591 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1592 ifp->if_ioctl = wm_ioctl;
1593 ifp->if_start = wm_start;
1594 ifp->if_watchdog = wm_watchdog;
1595 ifp->if_init = wm_init;
1596 ifp->if_stop = wm_stop;
1597 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1598 IFQ_SET_READY(&ifp->if_snd);
1599
1600 /* Check for jumbo frame */
1601 switch (sc->sc_type) {
1602 case WM_T_82573:
1603 /* XXX limited to 9234 if ASPM is disabled */
1604 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1605 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1606 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1607 break;
1608 case WM_T_82571:
1609 case WM_T_82572:
1610 case WM_T_82574:
1611 case WM_T_80003:
1612 case WM_T_ICH9:
1613 case WM_T_ICH10:
1614 /* XXX limited to 9234 */
1615 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1616 break;
1617 case WM_T_PCH:
1618 /* XXX limited to 4096 */
1619 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1620 break;
1621 case WM_T_82542_2_0:
1622 case WM_T_82542_2_1:
1623 case WM_T_82583:
1624 case WM_T_ICH8:
1625 /* No support for jumbo frame */
1626 break;
1627 default:
1628 /* ETHER_MAX_LEN_JUMBO */
1629 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1630 break;
1631 }
1632
1633 /*
1634 * If we're a i82543 or greater, we can support VLANs.
1635 */
1636 if (sc->sc_type >= WM_T_82543)
1637 sc->sc_ethercom.ec_capabilities |=
1638 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1639
1640 /*
1641 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1642 * on i82543 and later.
1643 */
1644 if (sc->sc_type >= WM_T_82543) {
1645 ifp->if_capabilities |=
1646 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1647 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1648 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1649 IFCAP_CSUM_TCPv6_Tx |
1650 IFCAP_CSUM_UDPv6_Tx;
1651 }
1652
1653 /*
1654 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1655 *
1656 * 82541GI (8086:1076) ... no
1657 * 82572EI (8086:10b9) ... yes
1658 */
1659 if (sc->sc_type >= WM_T_82571) {
1660 ifp->if_capabilities |=
1661 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1662 }
1663
1664 /*
1665 * If we're a i82544 or greater (except i82547), we can do
1666 * TCP segmentation offload.
1667 */
1668 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1669 ifp->if_capabilities |= IFCAP_TSOv4;
1670 }
1671
1672 if (sc->sc_type >= WM_T_82571) {
1673 ifp->if_capabilities |= IFCAP_TSOv6;
1674 }
1675
1676 /*
1677 * Attach the interface.
1678 */
1679 if_attach(ifp);
1680 ether_ifattach(ifp, enaddr);
1681 #if NRND > 0
1682 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1683 #endif
1684
1685 #ifdef WM_EVENT_COUNTERS
1686 /* Attach event counters. */
1687 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1688 NULL, xname, "txsstall");
1689 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1690 NULL, xname, "txdstall");
1691 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1692 NULL, xname, "txfifo_stall");
1693 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1694 NULL, xname, "txdw");
1695 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1696 NULL, xname, "txqe");
1697 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1698 NULL, xname, "rxintr");
1699 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1700 NULL, xname, "linkintr");
1701
1702 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1703 NULL, xname, "rxipsum");
1704 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1705 NULL, xname, "rxtusum");
1706 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1707 NULL, xname, "txipsum");
1708 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1709 NULL, xname, "txtusum");
1710 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1711 NULL, xname, "txtusum6");
1712
1713 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1714 NULL, xname, "txtso");
1715 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1716 NULL, xname, "txtso6");
1717 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1718 NULL, xname, "txtsopain");
1719
1720 for (i = 0; i < WM_NTXSEGS; i++) {
1721 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1722 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1723 NULL, xname, wm_txseg_evcnt_names[i]);
1724 }
1725
1726 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1727 NULL, xname, "txdrop");
1728
1729 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1730 NULL, xname, "tu");
1731
1732 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1733 NULL, xname, "tx_xoff");
1734 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1735 NULL, xname, "tx_xon");
1736 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1737 NULL, xname, "rx_xoff");
1738 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1739 NULL, xname, "rx_xon");
1740 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1741 NULL, xname, "rx_macctl");
1742 #endif /* WM_EVENT_COUNTERS */
1743
1744 if (pmf_device_register(self, NULL, NULL))
1745 pmf_class_network_register(self, ifp);
1746 else
1747 aprint_error_dev(self, "couldn't establish power handler\n");
1748
1749 return;
1750
1751 /*
1752 * Free any resources we've allocated during the failed attach
1753 * attempt. Do this in reverse order and fall through.
1754 */
1755 fail_5:
1756 for (i = 0; i < WM_NRXDESC; i++) {
1757 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1758 bus_dmamap_destroy(sc->sc_dmat,
1759 sc->sc_rxsoft[i].rxs_dmamap);
1760 }
1761 fail_4:
1762 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1763 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1764 bus_dmamap_destroy(sc->sc_dmat,
1765 sc->sc_txsoft[i].txs_dmamap);
1766 }
1767 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1768 fail_3:
1769 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1770 fail_2:
1771 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1772 cdata_size);
1773 fail_1:
1774 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1775 fail_0:
1776 return;
1777 }
1778
1779 /*
1780 * wm_tx_offload:
1781 *
1782 * Set up TCP/IP checksumming parameters for the
1783 * specified packet.
1784 */
1785 static int
1786 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1787 uint8_t *fieldsp)
1788 {
1789 struct mbuf *m0 = txs->txs_mbuf;
1790 struct livengood_tcpip_ctxdesc *t;
1791 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1792 uint32_t ipcse;
1793 struct ether_header *eh;
1794 int offset, iphl;
1795 uint8_t fields;
1796
1797 /*
1798 * XXX It would be nice if the mbuf pkthdr had offset
1799 * fields for the protocol headers.
1800 */
1801
1802 eh = mtod(m0, struct ether_header *);
1803 switch (htons(eh->ether_type)) {
1804 case ETHERTYPE_IP:
1805 case ETHERTYPE_IPV6:
1806 offset = ETHER_HDR_LEN;
1807 break;
1808
1809 case ETHERTYPE_VLAN:
1810 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1811 break;
1812
1813 default:
1814 /*
1815 * Don't support this protocol or encapsulation.
1816 */
1817 *fieldsp = 0;
1818 *cmdp = 0;
1819 return (0);
1820 }
1821
1822 if ((m0->m_pkthdr.csum_flags &
1823 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1824 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1825 } else {
1826 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1827 }
1828 ipcse = offset + iphl - 1;
1829
1830 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1831 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1832 seg = 0;
1833 fields = 0;
1834
1835 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1836 int hlen = offset + iphl;
1837 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1838
1839 if (__predict_false(m0->m_len <
1840 (hlen + sizeof(struct tcphdr)))) {
1841 /*
1842 * TCP/IP headers are not in the first mbuf; we need
1843 * to do this the slow and painful way. Let's just
1844 * hope this doesn't happen very often.
1845 */
1846 struct tcphdr th;
1847
1848 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1849
1850 m_copydata(m0, hlen, sizeof(th), &th);
1851 if (v4) {
1852 struct ip ip;
1853
1854 m_copydata(m0, offset, sizeof(ip), &ip);
1855 ip.ip_len = 0;
1856 m_copyback(m0,
1857 offset + offsetof(struct ip, ip_len),
1858 sizeof(ip.ip_len), &ip.ip_len);
1859 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1860 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1861 } else {
1862 struct ip6_hdr ip6;
1863
1864 m_copydata(m0, offset, sizeof(ip6), &ip6);
1865 ip6.ip6_plen = 0;
1866 m_copyback(m0,
1867 offset + offsetof(struct ip6_hdr, ip6_plen),
1868 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1869 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1870 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1871 }
1872 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1873 sizeof(th.th_sum), &th.th_sum);
1874
1875 hlen += th.th_off << 2;
1876 } else {
1877 /*
1878 * TCP/IP headers are in the first mbuf; we can do
1879 * this the easy way.
1880 */
1881 struct tcphdr *th;
1882
1883 if (v4) {
1884 struct ip *ip =
1885 (void *)(mtod(m0, char *) + offset);
1886 th = (void *)(mtod(m0, char *) + hlen);
1887
1888 ip->ip_len = 0;
1889 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1890 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1891 } else {
1892 struct ip6_hdr *ip6 =
1893 (void *)(mtod(m0, char *) + offset);
1894 th = (void *)(mtod(m0, char *) + hlen);
1895
1896 ip6->ip6_plen = 0;
1897 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1898 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1899 }
1900 hlen += th->th_off << 2;
1901 }
1902
1903 if (v4) {
1904 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1905 cmdlen |= WTX_TCPIP_CMD_IP;
1906 } else {
1907 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1908 ipcse = 0;
1909 }
1910 cmd |= WTX_TCPIP_CMD_TSE;
1911 cmdlen |= WTX_TCPIP_CMD_TSE |
1912 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1913 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1914 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1915 }
1916
1917 /*
1918 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1919 * offload feature, if we load the context descriptor, we
1920 * MUST provide valid values for IPCSS and TUCSS fields.
1921 */
1922
1923 ipcs = WTX_TCPIP_IPCSS(offset) |
1924 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1925 WTX_TCPIP_IPCSE(ipcse);
1926 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1927 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1928 fields |= WTX_IXSM;
1929 }
1930
1931 offset += iphl;
1932
1933 if (m0->m_pkthdr.csum_flags &
1934 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1935 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1936 fields |= WTX_TXSM;
1937 tucs = WTX_TCPIP_TUCSS(offset) |
1938 WTX_TCPIP_TUCSO(offset +
1939 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1940 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1941 } else if ((m0->m_pkthdr.csum_flags &
1942 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1943 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1944 fields |= WTX_TXSM;
1945 tucs = WTX_TCPIP_TUCSS(offset) |
1946 WTX_TCPIP_TUCSO(offset +
1947 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1948 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1949 } else {
1950 /* Just initialize it to a valid TCP context. */
1951 tucs = WTX_TCPIP_TUCSS(offset) |
1952 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1953 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1954 }
1955
1956 /* Fill in the context descriptor. */
1957 t = (struct livengood_tcpip_ctxdesc *)
1958 &sc->sc_txdescs[sc->sc_txnext];
1959 t->tcpip_ipcs = htole32(ipcs);
1960 t->tcpip_tucs = htole32(tucs);
1961 t->tcpip_cmdlen = htole32(cmdlen);
1962 t->tcpip_seg = htole32(seg);
1963 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1964
1965 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1966 txs->txs_ndesc++;
1967
1968 *cmdp = cmd;
1969 *fieldsp = fields;
1970
1971 return (0);
1972 }
1973
1974 static void
1975 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1976 {
1977 struct mbuf *m;
1978 int i;
1979
1980 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1981 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1982 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1983 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1984 m->m_data, m->m_len, m->m_flags);
1985 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1986 i, i == 1 ? "" : "s");
1987 }
1988
1989 /*
1990 * wm_82547_txfifo_stall:
1991 *
1992 * Callout used to wait for the 82547 Tx FIFO to drain,
1993 * reset the FIFO pointers, and restart packet transmission.
1994 */
1995 static void
1996 wm_82547_txfifo_stall(void *arg)
1997 {
1998 struct wm_softc *sc = arg;
1999 int s;
2000
2001 s = splnet();
2002
2003 if (sc->sc_txfifo_stall) {
2004 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2005 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2006 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2007 /*
2008 * Packets have drained. Stop transmitter, reset
2009 * FIFO pointers, restart transmitter, and kick
2010 * the packet queue.
2011 */
2012 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2013 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2014 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2015 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2016 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2017 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2018 CSR_WRITE(sc, WMREG_TCTL, tctl);
2019 CSR_WRITE_FLUSH(sc);
2020
2021 sc->sc_txfifo_head = 0;
2022 sc->sc_txfifo_stall = 0;
2023 wm_start(&sc->sc_ethercom.ec_if);
2024 } else {
2025 /*
2026 * Still waiting for packets to drain; try again in
2027 * another tick.
2028 */
2029 callout_schedule(&sc->sc_txfifo_ch, 1);
2030 }
2031 }
2032
2033 splx(s);
2034 }
2035
2036 /*
2037 * wm_82547_txfifo_bugchk:
2038 *
2039 * Check for bug condition in the 82547 Tx FIFO. We need to
2040 * prevent enqueueing a packet that would wrap around the end
2041 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2042 *
2043 * We do this by checking the amount of space before the end
2044 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2045 * the Tx FIFO, wait for all remaining packets to drain, reset
2046 * the internal FIFO pointers to the beginning, and restart
2047 * transmission on the interface.
2048 */
2049 #define WM_FIFO_HDR 0x10
2050 #define WM_82547_PAD_LEN 0x3e0
2051 static int
2052 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2053 {
2054 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2055 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2056
2057 /* Just return if already stalled. */
2058 if (sc->sc_txfifo_stall)
2059 return (1);
2060
2061 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2062 /* Stall only occurs in half-duplex mode. */
2063 goto send_packet;
2064 }
2065
2066 if (len >= WM_82547_PAD_LEN + space) {
2067 sc->sc_txfifo_stall = 1;
2068 callout_schedule(&sc->sc_txfifo_ch, 1);
2069 return (1);
2070 }
2071
2072 send_packet:
2073 sc->sc_txfifo_head += len;
2074 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2075 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2076
2077 return (0);
2078 }
2079
2080 /*
2081 * wm_start: [ifnet interface function]
2082 *
2083 * Start packet transmission on the interface.
2084 */
2085 static void
2086 wm_start(struct ifnet *ifp)
2087 {
2088 struct wm_softc *sc = ifp->if_softc;
2089 struct mbuf *m0;
2090 struct m_tag *mtag;
2091 struct wm_txsoft *txs;
2092 bus_dmamap_t dmamap;
2093 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2094 bus_addr_t curaddr;
2095 bus_size_t seglen, curlen;
2096 uint32_t cksumcmd;
2097 uint8_t cksumfields;
2098
2099 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2100 return;
2101
2102 /*
2103 * Remember the previous number of free descriptors.
2104 */
2105 ofree = sc->sc_txfree;
2106
2107 /*
2108 * Loop through the send queue, setting up transmit descriptors
2109 * until we drain the queue, or use up all available transmit
2110 * descriptors.
2111 */
2112 for (;;) {
2113 /* Grab a packet off the queue. */
2114 IFQ_POLL(&ifp->if_snd, m0);
2115 if (m0 == NULL)
2116 break;
2117
2118 DPRINTF(WM_DEBUG_TX,
2119 ("%s: TX: have packet to transmit: %p\n",
2120 device_xname(sc->sc_dev), m0));
2121
2122 /* Get a work queue entry. */
2123 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2124 wm_txintr(sc);
2125 if (sc->sc_txsfree == 0) {
2126 DPRINTF(WM_DEBUG_TX,
2127 ("%s: TX: no free job descriptors\n",
2128 device_xname(sc->sc_dev)));
2129 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2130 break;
2131 }
2132 }
2133
2134 txs = &sc->sc_txsoft[sc->sc_txsnext];
2135 dmamap = txs->txs_dmamap;
2136
2137 use_tso = (m0->m_pkthdr.csum_flags &
2138 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2139
2140 /*
2141 * So says the Linux driver:
2142 * The controller does a simple calculation to make sure
2143 * there is enough room in the FIFO before initiating the
2144 * DMA for each buffer. The calc is:
2145 * 4 = ceil(buffer len / MSS)
2146 * To make sure we don't overrun the FIFO, adjust the max
2147 * buffer len if the MSS drops.
2148 */
2149 dmamap->dm_maxsegsz =
2150 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2151 ? m0->m_pkthdr.segsz << 2
2152 : WTX_MAX_LEN;
2153
2154 /*
2155 * Load the DMA map. If this fails, the packet either
2156 * didn't fit in the allotted number of segments, or we
2157 * were short on resources. For the too-many-segments
2158 * case, we simply report an error and drop the packet,
2159 * since we can't sanely copy a jumbo packet to a single
2160 * buffer.
2161 */
2162 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2163 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2164 if (error) {
2165 if (error == EFBIG) {
2166 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2167 log(LOG_ERR, "%s: Tx packet consumes too many "
2168 "DMA segments, dropping...\n",
2169 device_xname(sc->sc_dev));
2170 IFQ_DEQUEUE(&ifp->if_snd, m0);
2171 wm_dump_mbuf_chain(sc, m0);
2172 m_freem(m0);
2173 continue;
2174 }
2175 /*
2176 * Short on resources, just stop for now.
2177 */
2178 DPRINTF(WM_DEBUG_TX,
2179 ("%s: TX: dmamap load failed: %d\n",
2180 device_xname(sc->sc_dev), error));
2181 break;
2182 }
2183
2184 segs_needed = dmamap->dm_nsegs;
2185 if (use_tso) {
2186 /* For sentinel descriptor; see below. */
2187 segs_needed++;
2188 }
2189
2190 /*
2191 * Ensure we have enough descriptors free to describe
2192 * the packet. Note, we always reserve one descriptor
2193 * at the end of the ring due to the semantics of the
2194 * TDT register, plus one more in the event we need
2195 * to load offload context.
2196 */
2197 if (segs_needed > sc->sc_txfree - 2) {
2198 /*
2199 * Not enough free descriptors to transmit this
2200 * packet. We haven't committed anything yet,
2201 * so just unload the DMA map, put the packet
2202 * pack on the queue, and punt. Notify the upper
2203 * layer that there are no more slots left.
2204 */
2205 DPRINTF(WM_DEBUG_TX,
2206 ("%s: TX: need %d (%d) descriptors, have %d\n",
2207 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2208 segs_needed, sc->sc_txfree - 1));
2209 ifp->if_flags |= IFF_OACTIVE;
2210 bus_dmamap_unload(sc->sc_dmat, dmamap);
2211 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2212 break;
2213 }
2214
2215 /*
2216 * Check for 82547 Tx FIFO bug. We need to do this
2217 * once we know we can transmit the packet, since we
2218 * do some internal FIFO space accounting here.
2219 */
2220 if (sc->sc_type == WM_T_82547 &&
2221 wm_82547_txfifo_bugchk(sc, m0)) {
2222 DPRINTF(WM_DEBUG_TX,
2223 ("%s: TX: 82547 Tx FIFO bug detected\n",
2224 device_xname(sc->sc_dev)));
2225 ifp->if_flags |= IFF_OACTIVE;
2226 bus_dmamap_unload(sc->sc_dmat, dmamap);
2227 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2228 break;
2229 }
2230
2231 IFQ_DEQUEUE(&ifp->if_snd, m0);
2232
2233 /*
2234 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2235 */
2236
2237 DPRINTF(WM_DEBUG_TX,
2238 ("%s: TX: packet has %d (%d) DMA segments\n",
2239 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2240
2241 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2242
2243 /*
2244 * Store a pointer to the packet so that we can free it
2245 * later.
2246 *
2247 * Initially, we consider the number of descriptors the
2248 * packet uses the number of DMA segments. This may be
2249 * incremented by 1 if we do checksum offload (a descriptor
2250 * is used to set the checksum context).
2251 */
2252 txs->txs_mbuf = m0;
2253 txs->txs_firstdesc = sc->sc_txnext;
2254 txs->txs_ndesc = segs_needed;
2255
2256 /* Set up offload parameters for this packet. */
2257 if (m0->m_pkthdr.csum_flags &
2258 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2259 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2260 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2261 if (wm_tx_offload(sc, txs, &cksumcmd,
2262 &cksumfields) != 0) {
2263 /* Error message already displayed. */
2264 bus_dmamap_unload(sc->sc_dmat, dmamap);
2265 continue;
2266 }
2267 } else {
2268 cksumcmd = 0;
2269 cksumfields = 0;
2270 }
2271
2272 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2273
2274 /* Sync the DMA map. */
2275 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2276 BUS_DMASYNC_PREWRITE);
2277
2278 /*
2279 * Initialize the transmit descriptor.
2280 */
2281 for (nexttx = sc->sc_txnext, seg = 0;
2282 seg < dmamap->dm_nsegs; seg++) {
2283 for (seglen = dmamap->dm_segs[seg].ds_len,
2284 curaddr = dmamap->dm_segs[seg].ds_addr;
2285 seglen != 0;
2286 curaddr += curlen, seglen -= curlen,
2287 nexttx = WM_NEXTTX(sc, nexttx)) {
2288 curlen = seglen;
2289
2290 /*
2291 * So says the Linux driver:
2292 * Work around for premature descriptor
2293 * write-backs in TSO mode. Append a
2294 * 4-byte sentinel descriptor.
2295 */
2296 if (use_tso &&
2297 seg == dmamap->dm_nsegs - 1 &&
2298 curlen > 8)
2299 curlen -= 4;
2300
2301 wm_set_dma_addr(
2302 &sc->sc_txdescs[nexttx].wtx_addr,
2303 curaddr);
2304 sc->sc_txdescs[nexttx].wtx_cmdlen =
2305 htole32(cksumcmd | curlen);
2306 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2307 0;
2308 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2309 cksumfields;
2310 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2311 lasttx = nexttx;
2312
2313 DPRINTF(WM_DEBUG_TX,
2314 ("%s: TX: desc %d: low 0x%08lx, "
2315 "len 0x%04x\n",
2316 device_xname(sc->sc_dev), nexttx,
2317 curaddr & 0xffffffffUL, (unsigned)curlen));
2318 }
2319 }
2320
2321 KASSERT(lasttx != -1);
2322
2323 /*
2324 * Set up the command byte on the last descriptor of
2325 * the packet. If we're in the interrupt delay window,
2326 * delay the interrupt.
2327 */
2328 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2329 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2330
2331 /*
2332 * If VLANs are enabled and the packet has a VLAN tag, set
2333 * up the descriptor to encapsulate the packet for us.
2334 *
2335 * This is only valid on the last descriptor of the packet.
2336 */
2337 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2338 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2339 htole32(WTX_CMD_VLE);
2340 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2341 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2342 }
2343
2344 txs->txs_lastdesc = lasttx;
2345
2346 DPRINTF(WM_DEBUG_TX,
2347 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2348 device_xname(sc->sc_dev),
2349 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2350
2351 /* Sync the descriptors we're using. */
2352 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2353 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2354
2355 /* Give the packet to the chip. */
2356 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2357
2358 DPRINTF(WM_DEBUG_TX,
2359 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2360
2361 DPRINTF(WM_DEBUG_TX,
2362 ("%s: TX: finished transmitting packet, job %d\n",
2363 device_xname(sc->sc_dev), sc->sc_txsnext));
2364
2365 /* Advance the tx pointer. */
2366 sc->sc_txfree -= txs->txs_ndesc;
2367 sc->sc_txnext = nexttx;
2368
2369 sc->sc_txsfree--;
2370 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2371
2372 #if NBPFILTER > 0
2373 /* Pass the packet to any BPF listeners. */
2374 if (ifp->if_bpf)
2375 bpf_mtap(ifp->if_bpf, m0);
2376 #endif /* NBPFILTER > 0 */
2377 }
2378
2379 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2380 /* No more slots; notify upper layer. */
2381 ifp->if_flags |= IFF_OACTIVE;
2382 }
2383
2384 if (sc->sc_txfree != ofree) {
2385 /* Set a watchdog timer in case the chip flakes out. */
2386 ifp->if_timer = 5;
2387 }
2388 }
2389
2390 /*
2391 * wm_watchdog: [ifnet interface function]
2392 *
2393 * Watchdog timer handler.
2394 */
2395 static void
2396 wm_watchdog(struct ifnet *ifp)
2397 {
2398 struct wm_softc *sc = ifp->if_softc;
2399
2400 /*
2401 * Since we're using delayed interrupts, sweep up
2402 * before we report an error.
2403 */
2404 wm_txintr(sc);
2405
2406 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2407 log(LOG_ERR,
2408 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2409 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2410 sc->sc_txnext);
2411 ifp->if_oerrors++;
2412
2413 /* Reset the interface. */
2414 (void) wm_init(ifp);
2415 }
2416
2417 /* Try to get more packets going. */
2418 wm_start(ifp);
2419 }
2420
2421 /*
2422 * wm_ioctl: [ifnet interface function]
2423 *
2424 * Handle control requests from the operator.
2425 */
2426 static int
2427 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2428 {
2429 struct wm_softc *sc = ifp->if_softc;
2430 struct ifreq *ifr = (struct ifreq *) data;
2431 struct ifaddr *ifa = (struct ifaddr *)data;
2432 struct sockaddr_dl *sdl;
2433 int diff, s, error;
2434
2435 s = splnet();
2436
2437 switch (cmd) {
2438 case SIOCSIFFLAGS:
2439 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2440 break;
2441 if (ifp->if_flags & IFF_UP) {
2442 diff = (ifp->if_flags ^ sc->sc_if_flags)
2443 & (IFF_PROMISC | IFF_ALLMULTI);
2444 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2445 /*
2446 * If the difference bettween last flag and
2447 * new flag is only IFF_PROMISC or
2448 * IFF_ALLMULTI, set multicast filter only
2449 * (don't reset to prevent link down).
2450 */
2451 wm_set_filter(sc);
2452 } else {
2453 /*
2454 * Reset the interface to pick up changes in
2455 * any other flags that affect the hardware
2456 * state.
2457 */
2458 wm_init(ifp);
2459 }
2460 } else {
2461 if (ifp->if_flags & IFF_RUNNING)
2462 wm_stop(ifp, 1);
2463 }
2464 sc->sc_if_flags = ifp->if_flags;
2465 error = 0;
2466 break;
2467 case SIOCSIFMEDIA:
2468 case SIOCGIFMEDIA:
2469 /* Flow control requires full-duplex mode. */
2470 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2471 (ifr->ifr_media & IFM_FDX) == 0)
2472 ifr->ifr_media &= ~IFM_ETH_FMASK;
2473 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2474 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2475 /* We can do both TXPAUSE and RXPAUSE. */
2476 ifr->ifr_media |=
2477 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2478 }
2479 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2480 }
2481 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2482 break;
2483 case SIOCINITIFADDR:
2484 if (ifa->ifa_addr->sa_family == AF_LINK) {
2485 sdl = satosdl(ifp->if_dl->ifa_addr);
2486 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2487 LLADDR(satosdl(ifa->ifa_addr)),
2488 ifp->if_addrlen);
2489 /* unicast address is first multicast entry */
2490 wm_set_filter(sc);
2491 error = 0;
2492 break;
2493 }
2494 /* Fall through for rest */
2495 default:
2496 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2497 break;
2498
2499 error = 0;
2500
2501 if (cmd == SIOCSIFCAP)
2502 error = (*ifp->if_init)(ifp);
2503 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2504 ;
2505 else if (ifp->if_flags & IFF_RUNNING) {
2506 /*
2507 * Multicast list has changed; set the hardware filter
2508 * accordingly.
2509 */
2510 wm_set_filter(sc);
2511 }
2512 break;
2513 }
2514
2515 /* Try to get more packets going. */
2516 wm_start(ifp);
2517
2518 splx(s);
2519 return (error);
2520 }
2521
2522 /*
2523 * wm_intr:
2524 *
2525 * Interrupt service routine.
2526 */
2527 static int
2528 wm_intr(void *arg)
2529 {
2530 struct wm_softc *sc = arg;
2531 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2532 uint32_t icr;
2533 int handled = 0;
2534
2535 while (1 /* CONSTCOND */) {
2536 icr = CSR_READ(sc, WMREG_ICR);
2537 if ((icr & sc->sc_icr) == 0)
2538 break;
2539 #if 0 /*NRND > 0*/
2540 if (RND_ENABLED(&sc->rnd_source))
2541 rnd_add_uint32(&sc->rnd_source, icr);
2542 #endif
2543
2544 handled = 1;
2545
2546 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2547 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2548 DPRINTF(WM_DEBUG_RX,
2549 ("%s: RX: got Rx intr 0x%08x\n",
2550 device_xname(sc->sc_dev),
2551 icr & (ICR_RXDMT0|ICR_RXT0)));
2552 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2553 }
2554 #endif
2555 wm_rxintr(sc);
2556
2557 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2558 if (icr & ICR_TXDW) {
2559 DPRINTF(WM_DEBUG_TX,
2560 ("%s: TX: got TXDW interrupt\n",
2561 device_xname(sc->sc_dev)));
2562 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2563 }
2564 #endif
2565 wm_txintr(sc);
2566
2567 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2568 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2569 wm_linkintr(sc, icr);
2570 }
2571
2572 if (icr & ICR_RXO) {
2573 ifp->if_ierrors++;
2574 #if defined(WM_DEBUG)
2575 log(LOG_WARNING, "%s: Receive overrun\n",
2576 device_xname(sc->sc_dev));
2577 #endif /* defined(WM_DEBUG) */
2578 }
2579 }
2580
2581 if (handled) {
2582 /* Try to get more packets going. */
2583 wm_start(ifp);
2584 }
2585
2586 return (handled);
2587 }
2588
2589 /*
2590 * wm_txintr:
2591 *
2592 * Helper; handle transmit interrupts.
2593 */
2594 static void
2595 wm_txintr(struct wm_softc *sc)
2596 {
2597 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2598 struct wm_txsoft *txs;
2599 uint8_t status;
2600 int i;
2601
2602 ifp->if_flags &= ~IFF_OACTIVE;
2603
2604 /*
2605 * Go through the Tx list and free mbufs for those
2606 * frames which have been transmitted.
2607 */
2608 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2609 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2610 txs = &sc->sc_txsoft[i];
2611
2612 DPRINTF(WM_DEBUG_TX,
2613 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2614
2615 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2616 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2617
2618 status =
2619 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2620 if ((status & WTX_ST_DD) == 0) {
2621 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2622 BUS_DMASYNC_PREREAD);
2623 break;
2624 }
2625
2626 DPRINTF(WM_DEBUG_TX,
2627 ("%s: TX: job %d done: descs %d..%d\n",
2628 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2629 txs->txs_lastdesc));
2630
2631 /*
2632 * XXX We should probably be using the statistics
2633 * XXX registers, but I don't know if they exist
2634 * XXX on chips before the i82544.
2635 */
2636
2637 #ifdef WM_EVENT_COUNTERS
2638 if (status & WTX_ST_TU)
2639 WM_EVCNT_INCR(&sc->sc_ev_tu);
2640 #endif /* WM_EVENT_COUNTERS */
2641
2642 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2643 ifp->if_oerrors++;
2644 if (status & WTX_ST_LC)
2645 log(LOG_WARNING, "%s: late collision\n",
2646 device_xname(sc->sc_dev));
2647 else if (status & WTX_ST_EC) {
2648 ifp->if_collisions += 16;
2649 log(LOG_WARNING, "%s: excessive collisions\n",
2650 device_xname(sc->sc_dev));
2651 }
2652 } else
2653 ifp->if_opackets++;
2654
2655 sc->sc_txfree += txs->txs_ndesc;
2656 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2657 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2658 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2659 m_freem(txs->txs_mbuf);
2660 txs->txs_mbuf = NULL;
2661 }
2662
2663 /* Update the dirty transmit buffer pointer. */
2664 sc->sc_txsdirty = i;
2665 DPRINTF(WM_DEBUG_TX,
2666 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2667
2668 /*
2669 * If there are no more pending transmissions, cancel the watchdog
2670 * timer.
2671 */
2672 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2673 ifp->if_timer = 0;
2674 }
2675
2676 /*
2677 * wm_rxintr:
2678 *
2679 * Helper; handle receive interrupts.
2680 */
2681 static void
2682 wm_rxintr(struct wm_softc *sc)
2683 {
2684 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2685 struct wm_rxsoft *rxs;
2686 struct mbuf *m;
2687 int i, len;
2688 uint8_t status, errors;
2689 uint16_t vlantag;
2690
2691 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2692 rxs = &sc->sc_rxsoft[i];
2693
2694 DPRINTF(WM_DEBUG_RX,
2695 ("%s: RX: checking descriptor %d\n",
2696 device_xname(sc->sc_dev), i));
2697
2698 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2699
2700 status = sc->sc_rxdescs[i].wrx_status;
2701 errors = sc->sc_rxdescs[i].wrx_errors;
2702 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2703 vlantag = sc->sc_rxdescs[i].wrx_special;
2704
2705 if ((status & WRX_ST_DD) == 0) {
2706 /*
2707 * We have processed all of the receive descriptors.
2708 */
2709 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2710 break;
2711 }
2712
2713 if (__predict_false(sc->sc_rxdiscard)) {
2714 DPRINTF(WM_DEBUG_RX,
2715 ("%s: RX: discarding contents of descriptor %d\n",
2716 device_xname(sc->sc_dev), i));
2717 WM_INIT_RXDESC(sc, i);
2718 if (status & WRX_ST_EOP) {
2719 /* Reset our state. */
2720 DPRINTF(WM_DEBUG_RX,
2721 ("%s: RX: resetting rxdiscard -> 0\n",
2722 device_xname(sc->sc_dev)));
2723 sc->sc_rxdiscard = 0;
2724 }
2725 continue;
2726 }
2727
2728 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2729 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2730
2731 m = rxs->rxs_mbuf;
2732
2733 /*
2734 * Add a new receive buffer to the ring, unless of
2735 * course the length is zero. Treat the latter as a
2736 * failed mapping.
2737 */
2738 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2739 /*
2740 * Failed, throw away what we've done so
2741 * far, and discard the rest of the packet.
2742 */
2743 ifp->if_ierrors++;
2744 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2745 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2746 WM_INIT_RXDESC(sc, i);
2747 if ((status & WRX_ST_EOP) == 0)
2748 sc->sc_rxdiscard = 1;
2749 if (sc->sc_rxhead != NULL)
2750 m_freem(sc->sc_rxhead);
2751 WM_RXCHAIN_RESET(sc);
2752 DPRINTF(WM_DEBUG_RX,
2753 ("%s: RX: Rx buffer allocation failed, "
2754 "dropping packet%s\n", device_xname(sc->sc_dev),
2755 sc->sc_rxdiscard ? " (discard)" : ""));
2756 continue;
2757 }
2758
2759 m->m_len = len;
2760 sc->sc_rxlen += len;
2761 DPRINTF(WM_DEBUG_RX,
2762 ("%s: RX: buffer at %p len %d\n",
2763 device_xname(sc->sc_dev), m->m_data, len));
2764
2765 /*
2766 * If this is not the end of the packet, keep
2767 * looking.
2768 */
2769 if ((status & WRX_ST_EOP) == 0) {
2770 WM_RXCHAIN_LINK(sc, m);
2771 DPRINTF(WM_DEBUG_RX,
2772 ("%s: RX: not yet EOP, rxlen -> %d\n",
2773 device_xname(sc->sc_dev), sc->sc_rxlen));
2774 continue;
2775 }
2776
2777 /*
2778 * Okay, we have the entire packet now. The chip is
2779 * configured to include the FCS (not all chips can
2780 * be configured to strip it), so we need to trim it.
2781 * May need to adjust length of previous mbuf in the
2782 * chain if the current mbuf is too short.
2783 */
2784 if (m->m_len < ETHER_CRC_LEN) {
2785 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2786 m->m_len = 0;
2787 } else {
2788 m->m_len -= ETHER_CRC_LEN;
2789 }
2790 len = sc->sc_rxlen - ETHER_CRC_LEN;
2791
2792 WM_RXCHAIN_LINK(sc, m);
2793
2794 *sc->sc_rxtailp = NULL;
2795 m = sc->sc_rxhead;
2796
2797 WM_RXCHAIN_RESET(sc);
2798
2799 DPRINTF(WM_DEBUG_RX,
2800 ("%s: RX: have entire packet, len -> %d\n",
2801 device_xname(sc->sc_dev), len));
2802
2803 /*
2804 * If an error occurred, update stats and drop the packet.
2805 */
2806 if (errors &
2807 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2808 ifp->if_ierrors++;
2809 if (errors & WRX_ER_SE)
2810 log(LOG_WARNING, "%s: symbol error\n",
2811 device_xname(sc->sc_dev));
2812 else if (errors & WRX_ER_SEQ)
2813 log(LOG_WARNING, "%s: receive sequence error\n",
2814 device_xname(sc->sc_dev));
2815 else if (errors & WRX_ER_CE)
2816 log(LOG_WARNING, "%s: CRC error\n",
2817 device_xname(sc->sc_dev));
2818 m_freem(m);
2819 continue;
2820 }
2821
2822 /*
2823 * No errors. Receive the packet.
2824 */
2825 m->m_pkthdr.rcvif = ifp;
2826 m->m_pkthdr.len = len;
2827
2828 /*
2829 * If VLANs are enabled, VLAN packets have been unwrapped
2830 * for us. Associate the tag with the packet.
2831 */
2832 if ((status & WRX_ST_VP) != 0) {
2833 VLAN_INPUT_TAG(ifp, m,
2834 le16toh(vlantag),
2835 continue);
2836 }
2837
2838 /*
2839 * Set up checksum info for this packet.
2840 */
2841 if ((status & WRX_ST_IXSM) == 0) {
2842 if (status & WRX_ST_IPCS) {
2843 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2844 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2845 if (errors & WRX_ER_IPE)
2846 m->m_pkthdr.csum_flags |=
2847 M_CSUM_IPv4_BAD;
2848 }
2849 if (status & WRX_ST_TCPCS) {
2850 /*
2851 * Note: we don't know if this was TCP or UDP,
2852 * so we just set both bits, and expect the
2853 * upper layers to deal.
2854 */
2855 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2856 m->m_pkthdr.csum_flags |=
2857 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2858 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2859 if (errors & WRX_ER_TCPE)
2860 m->m_pkthdr.csum_flags |=
2861 M_CSUM_TCP_UDP_BAD;
2862 }
2863 }
2864
2865 ifp->if_ipackets++;
2866
2867 #if NBPFILTER > 0
2868 /* Pass this up to any BPF listeners. */
2869 if (ifp->if_bpf)
2870 bpf_mtap(ifp->if_bpf, m);
2871 #endif /* NBPFILTER > 0 */
2872
2873 /* Pass it on. */
2874 (*ifp->if_input)(ifp, m);
2875 }
2876
2877 /* Update the receive pointer. */
2878 sc->sc_rxptr = i;
2879
2880 DPRINTF(WM_DEBUG_RX,
2881 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2882 }
2883
2884 /*
2885 * wm_linkintr:
2886 *
2887 * Helper; handle link interrupts.
2888 */
2889 static void
2890 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2891 {
2892 uint32_t status;
2893
2894 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2895 __func__));
2896 /*
2897 * If we get a link status interrupt on a 1000BASE-T
2898 * device, just fall into the normal MII tick path.
2899 */
2900 if (sc->sc_flags & WM_F_HAS_MII) {
2901 if (icr & ICR_LSC) {
2902 DPRINTF(WM_DEBUG_LINK,
2903 ("%s: LINK: LSC -> mii_tick\n",
2904 device_xname(sc->sc_dev)));
2905 mii_tick(&sc->sc_mii);
2906 if (sc->sc_type == WM_T_82543) {
2907 int miistatus, active;
2908
2909 /*
2910 * With 82543, we need to force speed and
2911 * duplex on the MAC equal to what the PHY
2912 * speed and duplex configuration is.
2913 */
2914 miistatus = sc->sc_mii.mii_media_status;
2915
2916 if (miistatus & IFM_ACTIVE) {
2917 active = sc->sc_mii.mii_media_active;
2918 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2919 | CTRL_FD);
2920 switch (IFM_SUBTYPE(active)) {
2921 case IFM_10_T:
2922 sc->sc_ctrl |= CTRL_SPEED_10;
2923 break;
2924 case IFM_100_TX:
2925 sc->sc_ctrl |= CTRL_SPEED_100;
2926 break;
2927 case IFM_1000_T:
2928 sc->sc_ctrl |= CTRL_SPEED_1000;
2929 break;
2930 default:
2931 /*
2932 * fiber?
2933 * Shoud not enter here.
2934 */
2935 printf("unknown media (%x)\n",
2936 active);
2937 break;
2938 }
2939 if (active & IFM_FDX)
2940 sc->sc_ctrl |= CTRL_FD;
2941 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2942 }
2943 }
2944 } else if (icr & ICR_RXSEQ) {
2945 DPRINTF(WM_DEBUG_LINK,
2946 ("%s: LINK Receive sequence error\n",
2947 device_xname(sc->sc_dev)));
2948 }
2949 return;
2950 }
2951
2952 /* TBI mode */
2953 status = CSR_READ(sc, WMREG_STATUS);
2954 if (icr & ICR_LSC) {
2955 if (status & STATUS_LU) {
2956 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2957 device_xname(sc->sc_dev),
2958 (status & STATUS_FD) ? "FDX" : "HDX"));
2959 /*
2960 * NOTE: CTRL will update TFCE and RFCE automatically,
2961 * so we should update sc->sc_ctrl
2962 */
2963
2964 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2965 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2966 sc->sc_fcrtl &= ~FCRTL_XONE;
2967 if (status & STATUS_FD)
2968 sc->sc_tctl |=
2969 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2970 else
2971 sc->sc_tctl |=
2972 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2973 if (sc->sc_ctrl & CTRL_TFCE)
2974 sc->sc_fcrtl |= FCRTL_XONE;
2975 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2976 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2977 WMREG_OLD_FCRTL : WMREG_FCRTL,
2978 sc->sc_fcrtl);
2979 sc->sc_tbi_linkup = 1;
2980 } else {
2981 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2982 device_xname(sc->sc_dev)));
2983 sc->sc_tbi_linkup = 0;
2984 }
2985 wm_tbi_set_linkled(sc);
2986 } else if (icr & ICR_RXCFG) {
2987 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2988 device_xname(sc->sc_dev)));
2989 sc->sc_tbi_nrxcfg++;
2990 wm_check_for_link(sc);
2991 } else if (icr & ICR_RXSEQ) {
2992 DPRINTF(WM_DEBUG_LINK,
2993 ("%s: LINK: Receive sequence error\n",
2994 device_xname(sc->sc_dev)));
2995 }
2996 }
2997
2998 /*
2999 * wm_tick:
3000 *
3001 * One second timer, used to check link status, sweep up
3002 * completed transmit jobs, etc.
3003 */
3004 static void
3005 wm_tick(void *arg)
3006 {
3007 struct wm_softc *sc = arg;
3008 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3009 int s;
3010
3011 s = splnet();
3012
3013 if (sc->sc_type >= WM_T_82542_2_1) {
3014 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3015 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3016 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3017 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3018 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3019 }
3020
3021 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3022 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3023
3024 if (sc->sc_flags & WM_F_HAS_MII)
3025 mii_tick(&sc->sc_mii);
3026 else
3027 wm_tbi_check_link(sc);
3028
3029 splx(s);
3030
3031 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3032 }
3033
3034 /*
3035 * wm_reset:
3036 *
3037 * Reset the i82542 chip.
3038 */
3039 static void
3040 wm_reset(struct wm_softc *sc)
3041 {
3042 int phy_reset = 0;
3043 uint32_t reg, func, mask;
3044 int i;
3045
3046 /*
3047 * Allocate on-chip memory according to the MTU size.
3048 * The Packet Buffer Allocation register must be written
3049 * before the chip is reset.
3050 */
3051 switch (sc->sc_type) {
3052 case WM_T_82547:
3053 case WM_T_82547_2:
3054 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3055 PBA_22K : PBA_30K;
3056 sc->sc_txfifo_head = 0;
3057 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3058 sc->sc_txfifo_size =
3059 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3060 sc->sc_txfifo_stall = 0;
3061 break;
3062 case WM_T_82571:
3063 case WM_T_82572:
3064 case WM_T_80003:
3065 sc->sc_pba = PBA_32K;
3066 break;
3067 case WM_T_82573:
3068 sc->sc_pba = PBA_12K;
3069 break;
3070 case WM_T_82574:
3071 case WM_T_82583:
3072 sc->sc_pba = PBA_20K;
3073 break;
3074 case WM_T_ICH8:
3075 sc->sc_pba = PBA_8K;
3076 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3077 break;
3078 case WM_T_ICH9:
3079 case WM_T_ICH10:
3080 case WM_T_PCH:
3081 sc->sc_pba = PBA_10K;
3082 break;
3083 default:
3084 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3085 PBA_40K : PBA_48K;
3086 break;
3087 }
3088 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3089
3090 if (sc->sc_flags & WM_F_PCIE) {
3091 int timeout = 800;
3092
3093 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3094 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3095
3096 while (timeout--) {
3097 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3098 break;
3099 delay(100);
3100 }
3101 }
3102
3103 /* clear interrupt */
3104 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3105
3106 /* Stop the transmit and receive processes. */
3107 CSR_WRITE(sc, WMREG_RCTL, 0);
3108 CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3109
3110 /* set_tbi_sbp_82543() */
3111
3112 delay(10*1000);
3113
3114 /* Must acquire the MDIO ownership before MAC reset */
3115 switch(sc->sc_type) {
3116 case WM_T_82573:
3117 case WM_T_82574:
3118 case WM_T_82583:
3119 i = 0;
3120 reg = CSR_READ(sc, WMREG_EXTCNFCTR)
3121 | EXTCNFCTR_MDIO_SW_OWNERSHIP;
3122 do {
3123 CSR_WRITE(sc, WMREG_EXTCNFCTR,
3124 reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
3125 reg = CSR_READ(sc, WMREG_EXTCNFCTR);
3126 if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
3127 break;
3128 reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
3129 delay(2*1000);
3130 i++;
3131 } while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
3132 break;
3133 default:
3134 break;
3135 }
3136
3137 /*
3138 * 82541 Errata 29? & 82547 Errata 28?
3139 * See also the description about PHY_RST bit in CTRL register
3140 * in 8254x_GBe_SDM.pdf.
3141 */
3142 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3143 CSR_WRITE(sc, WMREG_CTRL,
3144 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3145 delay(5000);
3146 }
3147
3148 if (sc->sc_type == WM_T_PCH) {
3149 /* Save K1 */
3150 }
3151
3152 switch (sc->sc_type) {
3153 case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3154 case WM_T_82541:
3155 case WM_T_82541_2:
3156 case WM_T_82547:
3157 case WM_T_82547_2:
3158 /*
3159 * On some chipsets, a reset through a memory-mapped write
3160 * cycle can cause the chip to reset before completing the
3161 * write cycle. This causes major headache that can be
3162 * avoided by issuing the reset via indirect register writes
3163 * through I/O space.
3164 *
3165 * So, if we successfully mapped the I/O BAR at attach time,
3166 * use that. Otherwise, try our luck with a memory-mapped
3167 * reset.
3168 */
3169 if (sc->sc_flags & WM_F_IOH_VALID)
3170 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3171 else
3172 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3173 break;
3174 case WM_T_82545_3:
3175 case WM_T_82546_3:
3176 /* Use the shadow control register on these chips. */
3177 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3178 break;
3179 case WM_T_80003:
3180 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3181 mask = func ? SWFW_PHY1_SM : SWFW_PHY0_SM;
3182 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3183 wm_get_swfw_semaphore(sc, mask);
3184 CSR_WRITE(sc, WMREG_CTRL, reg);
3185 wm_put_swfw_semaphore(sc, mask);
3186 break;
3187 case WM_T_ICH8:
3188 case WM_T_ICH9:
3189 case WM_T_ICH10:
3190 case WM_T_PCH:
3191 reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3192 if (wm_check_reset_block(sc) == 0) {
3193 if (sc->sc_type >= WM_T_PCH) {
3194 uint32_t status;
3195
3196 status = CSR_READ(sc, WMREG_STATUS);
3197 CSR_WRITE(sc, WMREG_STATUS,
3198 status & ~STATUS_PHYRA);
3199 }
3200
3201 reg |= CTRL_PHY_RESET;
3202 phy_reset = 1;
3203 }
3204 wm_get_swfwhw_semaphore(sc);
3205 CSR_WRITE(sc, WMREG_CTRL, reg);
3206 delay(20*1000);
3207 wm_put_swfwhw_semaphore(sc);
3208 break;
3209 case WM_T_82542_2_0:
3210 case WM_T_82542_2_1:
3211 case WM_T_82543:
3212 case WM_T_82540:
3213 case WM_T_82545:
3214 case WM_T_82546:
3215 case WM_T_82571:
3216 case WM_T_82572:
3217 case WM_T_82573:
3218 case WM_T_82574:
3219 case WM_T_82583:
3220 default:
3221 /* Everything else can safely use the documented method. */
3222 CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3223 break;
3224 }
3225
3226 if (phy_reset != 0)
3227 wm_get_cfg_done(sc);
3228
3229 /* reload EEPROM */
3230 switch(sc->sc_type) {
3231 case WM_T_82542_2_0:
3232 case WM_T_82542_2_1:
3233 case WM_T_82543:
3234 case WM_T_82544:
3235 delay(10);
3236 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3237 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3238 delay(2000);
3239 break;
3240 case WM_T_82540:
3241 case WM_T_82545:
3242 case WM_T_82545_3:
3243 case WM_T_82546:
3244 case WM_T_82546_3:
3245 delay(5*1000);
3246 /* XXX Disable HW ARPs on ASF enabled adapters */
3247 break;
3248 case WM_T_82541:
3249 case WM_T_82541_2:
3250 case WM_T_82547:
3251 case WM_T_82547_2:
3252 delay(20000);
3253 /* XXX Disable HW ARPs on ASF enabled adapters */
3254 break;
3255 case WM_T_82571:
3256 case WM_T_82572:
3257 case WM_T_82573:
3258 case WM_T_82574:
3259 case WM_T_82583:
3260 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3261 delay(10);
3262 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3263 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3264 }
3265 /* check EECD_EE_AUTORD */
3266 wm_get_auto_rd_done(sc);
3267 /*
3268 * Phy configuration from NVM just starts after EECD_AUTO_RD
3269 * is set.
3270 */
3271 if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3272 || (sc->sc_type == WM_T_82583))
3273 delay(25*1000);
3274 break;
3275 case WM_T_80003:
3276 case WM_T_ICH8:
3277 case WM_T_ICH9:
3278 /* check EECD_EE_AUTORD */
3279 wm_get_auto_rd_done(sc);
3280 break;
3281 case WM_T_ICH10:
3282 case WM_T_PCH:
3283 wm_lan_init_done(sc);
3284 break;
3285 default:
3286 panic("%s: unknown type\n", __func__);
3287 }
3288
3289 /* reload sc_ctrl */
3290 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3291
3292 /*
3293 * For PCH, this write will make sure that any noise will be detected
3294 * as a CRC error and be dropped rather than show up as a bad packet
3295 * to the DMA engine
3296 */
3297 if (sc->sc_type == WM_T_PCH)
3298 CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3299
3300 #if 0
3301 for (i = 0; i < 1000; i++) {
3302 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3303 return;
3304 }
3305 delay(20);
3306 }
3307
3308 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3309 log(LOG_ERR, "%s: reset failed to complete\n",
3310 device_xname(sc->sc_dev));
3311 #endif
3312 }
3313
3314 /*
3315 * wm_init: [ifnet interface function]
3316 *
3317 * Initialize the interface. Must be called at splnet().
3318 */
3319 static int
3320 wm_init(struct ifnet *ifp)
3321 {
3322 struct wm_softc *sc = ifp->if_softc;
3323 struct wm_rxsoft *rxs;
3324 int i, error = 0;
3325 uint32_t reg;
3326
3327 /*
3328 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3329 * There is a small but measurable benefit to avoiding the adjusment
3330 * of the descriptor so that the headers are aligned, for normal mtu,
3331 * on such platforms. One possibility is that the DMA itself is
3332 * slightly more efficient if the front of the entire packet (instead
3333 * of the front of the headers) is aligned.
3334 *
3335 * Note we must always set align_tweak to 0 if we are using
3336 * jumbo frames.
3337 */
3338 #ifdef __NO_STRICT_ALIGNMENT
3339 sc->sc_align_tweak = 0;
3340 #else
3341 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3342 sc->sc_align_tweak = 0;
3343 else
3344 sc->sc_align_tweak = 2;
3345 #endif /* __NO_STRICT_ALIGNMENT */
3346
3347 /* Cancel any pending I/O. */
3348 wm_stop(ifp, 0);
3349
3350 /* update statistics before reset */
3351 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3352 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3353
3354 /* Reset the chip to a known state. */
3355 wm_reset(sc);
3356
3357 switch (sc->sc_type) {
3358 case WM_T_82571:
3359 case WM_T_82572:
3360 case WM_T_82573:
3361 case WM_T_82574:
3362 case WM_T_82583:
3363 case WM_T_80003:
3364 case WM_T_ICH8:
3365 case WM_T_ICH9:
3366 case WM_T_ICH10:
3367 case WM_T_PCH:
3368 if (wm_check_mng_mode(sc) != 0)
3369 wm_get_hw_control(sc);
3370 break;
3371 default:
3372 break;
3373 }
3374
3375 /* Initialize the transmit descriptor ring. */
3376 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3377 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3378 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3379 sc->sc_txfree = WM_NTXDESC(sc);
3380 sc->sc_txnext = 0;
3381
3382 if (sc->sc_type < WM_T_82543) {
3383 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3384 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3385 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3386 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3387 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3388 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3389 } else {
3390 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3391 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3392 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3393 CSR_WRITE(sc, WMREG_TDH, 0);
3394 CSR_WRITE(sc, WMREG_TDT, 0);
3395 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3396 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3397
3398 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3399 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3400 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3401 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3402 }
3403 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3404 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3405
3406 /* Initialize the transmit job descriptors. */
3407 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3408 sc->sc_txsoft[i].txs_mbuf = NULL;
3409 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3410 sc->sc_txsnext = 0;
3411 sc->sc_txsdirty = 0;
3412
3413 /*
3414 * Initialize the receive descriptor and receive job
3415 * descriptor rings.
3416 */
3417 if (sc->sc_type < WM_T_82543) {
3418 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3419 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3420 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3421 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3422 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3423 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3424
3425 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3426 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3427 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3428 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3429 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3430 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3431 } else {
3432 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3433 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3434 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3435 CSR_WRITE(sc, WMREG_RDH, 0);
3436 CSR_WRITE(sc, WMREG_RDT, 0);
3437 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3438 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3439 }
3440 for (i = 0; i < WM_NRXDESC; i++) {
3441 rxs = &sc->sc_rxsoft[i];
3442 if (rxs->rxs_mbuf == NULL) {
3443 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3444 log(LOG_ERR, "%s: unable to allocate or map rx "
3445 "buffer %d, error = %d\n",
3446 device_xname(sc->sc_dev), i, error);
3447 /*
3448 * XXX Should attempt to run with fewer receive
3449 * XXX buffers instead of just failing.
3450 */
3451 wm_rxdrain(sc);
3452 goto out;
3453 }
3454 } else
3455 WM_INIT_RXDESC(sc, i);
3456 }
3457 sc->sc_rxptr = 0;
3458 sc->sc_rxdiscard = 0;
3459 WM_RXCHAIN_RESET(sc);
3460
3461 /*
3462 * Clear out the VLAN table -- we don't use it (yet).
3463 */
3464 CSR_WRITE(sc, WMREG_VET, 0);
3465 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3466 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3467
3468 /*
3469 * Set up flow-control parameters.
3470 *
3471 * XXX Values could probably stand some tuning.
3472 */
3473 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3474 && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
3475 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3476 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3477 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3478 }
3479
3480 sc->sc_fcrtl = FCRTL_DFLT;
3481 if (sc->sc_type < WM_T_82543) {
3482 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3483 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3484 } else {
3485 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3486 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3487 }
3488
3489 if (sc->sc_type == WM_T_80003)
3490 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3491 else
3492 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3493
3494 /* Deal with VLAN enables. */
3495 if (VLAN_ATTACHED(&sc->sc_ethercom))
3496 sc->sc_ctrl |= CTRL_VME;
3497 else
3498 sc->sc_ctrl &= ~CTRL_VME;
3499
3500 /* Write the control registers. */
3501 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3502
3503 if (sc->sc_flags & WM_F_HAS_MII) {
3504 int val;
3505
3506 switch (sc->sc_type) {
3507 case WM_T_80003:
3508 case WM_T_ICH8:
3509 case WM_T_ICH9:
3510 case WM_T_ICH10:
3511 case WM_T_PCH:
3512 /*
3513 * Set the mac to wait the maximum time between each
3514 * iteration and increase the max iterations when
3515 * polling the phy; this fixes erroneous timeouts at
3516 * 10Mbps.
3517 */
3518 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3519 0xFFFF);
3520 val = wm_kmrn_readreg(sc,
3521 KUMCTRLSTA_OFFSET_INB_PARAM);
3522 val |= 0x3F;
3523 wm_kmrn_writereg(sc,
3524 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3525 break;
3526 default:
3527 break;
3528 }
3529
3530 if (sc->sc_type == WM_T_80003) {
3531 val = CSR_READ(sc, WMREG_CTRL_EXT);
3532 val &= ~CTRL_EXT_LINK_MODE_MASK;
3533 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3534
3535 /* Bypass RX and TX FIFO's */
3536 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3537 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3538 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3539
3540 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3541 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3542 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3543 }
3544 }
3545 #if 0
3546 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3547 #endif
3548
3549 /*
3550 * Set up checksum offload parameters.
3551 */
3552 reg = CSR_READ(sc, WMREG_RXCSUM);
3553 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3554 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3555 reg |= RXCSUM_IPOFL;
3556 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3557 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3558 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3559 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3560 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3561
3562 /* Reset TBI's RXCFG count */
3563 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3564
3565 /*
3566 * Set up the interrupt registers.
3567 */
3568 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3569 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3570 ICR_RXO | ICR_RXT0;
3571 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3572 sc->sc_icr |= ICR_RXCFG;
3573 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3574
3575 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3576 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
3577 reg = CSR_READ(sc, WMREG_KABGTXD);
3578 reg |= KABGTXD_BGSQLBIAS;
3579 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3580 }
3581
3582 /* Set up the inter-packet gap. */
3583 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3584
3585 if (sc->sc_type >= WM_T_82543) {
3586 /*
3587 * Set up the interrupt throttling register (units of 256ns)
3588 * Note that a footnote in Intel's documentation says this
3589 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3590 * or 10Mbit mode. Empirically, it appears to be the case
3591 * that that is also true for the 1024ns units of the other
3592 * interrupt-related timer registers -- so, really, we ought
3593 * to divide this value by 4 when the link speed is low.
3594 *
3595 * XXX implement this division at link speed change!
3596 */
3597
3598 /*
3599 * For N interrupts/sec, set this value to:
3600 * 1000000000 / (N * 256). Note that we set the
3601 * absolute and packet timer values to this value
3602 * divided by 4 to get "simple timer" behavior.
3603 */
3604
3605 sc->sc_itr = 1500; /* 2604 ints/sec */
3606 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3607 }
3608
3609 /* Set the VLAN ethernetype. */
3610 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3611
3612 /*
3613 * Set up the transmit control register; we start out with
3614 * a collision distance suitable for FDX, but update it whe
3615 * we resolve the media type.
3616 */
3617 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3618 | TCTL_CT(TX_COLLISION_THRESHOLD)
3619 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3620 if (sc->sc_type >= WM_T_82571)
3621 sc->sc_tctl |= TCTL_MULR;
3622 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3623
3624 if (sc->sc_type == WM_T_80003) {
3625 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3626 reg &= ~TCTL_EXT_GCEX_MASK;
3627 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3628 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3629 }
3630
3631 /* Set the media. */
3632 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3633 goto out;
3634
3635 /*
3636 * Set up the receive control register; we actually program
3637 * the register when we set the receive filter. Use multicast
3638 * address offset type 0.
3639 *
3640 * Only the i82544 has the ability to strip the incoming
3641 * CRC, so we don't enable that feature.
3642 */
3643 sc->sc_mchash_type = 0;
3644 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3645 | RCTL_MO(sc->sc_mchash_type);
3646
3647 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3648 && (ifp->if_mtu > ETHERMTU))
3649 sc->sc_rctl |= RCTL_LPE;
3650
3651 if (MCLBYTES == 2048) {
3652 sc->sc_rctl |= RCTL_2k;
3653 } else {
3654 if (sc->sc_type >= WM_T_82543) {
3655 switch(MCLBYTES) {
3656 case 4096:
3657 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3658 break;
3659 case 8192:
3660 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3661 break;
3662 case 16384:
3663 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3664 break;
3665 default:
3666 panic("wm_init: MCLBYTES %d unsupported",
3667 MCLBYTES);
3668 break;
3669 }
3670 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3671 }
3672
3673 /* Set the receive filter. */
3674 wm_set_filter(sc);
3675
3676 /* Start the one second link check clock. */
3677 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3678
3679 /* ...all done! */
3680 ifp->if_flags |= IFF_RUNNING;
3681 ifp->if_flags &= ~IFF_OACTIVE;
3682
3683 out:
3684 if (error)
3685 log(LOG_ERR, "%s: interface not running\n",
3686 device_xname(sc->sc_dev));
3687 return (error);
3688 }
3689
3690 /*
3691 * wm_rxdrain:
3692 *
3693 * Drain the receive queue.
3694 */
3695 static void
3696 wm_rxdrain(struct wm_softc *sc)
3697 {
3698 struct wm_rxsoft *rxs;
3699 int i;
3700
3701 for (i = 0; i < WM_NRXDESC; i++) {
3702 rxs = &sc->sc_rxsoft[i];
3703 if (rxs->rxs_mbuf != NULL) {
3704 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3705 m_freem(rxs->rxs_mbuf);
3706 rxs->rxs_mbuf = NULL;
3707 }
3708 }
3709 }
3710
3711 /*
3712 * wm_stop: [ifnet interface function]
3713 *
3714 * Stop transmission on the interface.
3715 */
3716 static void
3717 wm_stop(struct ifnet *ifp, int disable)
3718 {
3719 struct wm_softc *sc = ifp->if_softc;
3720 struct wm_txsoft *txs;
3721 int i;
3722
3723 /* Stop the one second clock. */
3724 callout_stop(&sc->sc_tick_ch);
3725
3726 /* Stop the 82547 Tx FIFO stall check timer. */
3727 if (sc->sc_type == WM_T_82547)
3728 callout_stop(&sc->sc_txfifo_ch);
3729
3730 if (sc->sc_flags & WM_F_HAS_MII) {
3731 /* Down the MII. */
3732 mii_down(&sc->sc_mii);
3733 } else {
3734 #if 0
3735 /* Should we clear PHY's status properly? */
3736 wm_reset(sc);
3737 #endif
3738 }
3739
3740 /* Stop the transmit and receive processes. */
3741 CSR_WRITE(sc, WMREG_TCTL, 0);
3742 CSR_WRITE(sc, WMREG_RCTL, 0);
3743
3744 /*
3745 * Clear the interrupt mask to ensure the device cannot assert its
3746 * interrupt line.
3747 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3748 * any currently pending or shared interrupt.
3749 */
3750 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3751 sc->sc_icr = 0;
3752
3753 /* Release any queued transmit buffers. */
3754 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3755 txs = &sc->sc_txsoft[i];
3756 if (txs->txs_mbuf != NULL) {
3757 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3758 m_freem(txs->txs_mbuf);
3759 txs->txs_mbuf = NULL;
3760 }
3761 }
3762
3763 /* Mark the interface as down and cancel the watchdog timer. */
3764 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3765 ifp->if_timer = 0;
3766
3767 if (disable)
3768 wm_rxdrain(sc);
3769 }
3770
3771 void
3772 wm_get_auto_rd_done(struct wm_softc *sc)
3773 {
3774 int i;
3775
3776 /* wait for eeprom to reload */
3777 switch (sc->sc_type) {
3778 case WM_T_82571:
3779 case WM_T_82572:
3780 case WM_T_82573:
3781 case WM_T_82574:
3782 case WM_T_82583:
3783 case WM_T_80003:
3784 case WM_T_ICH8:
3785 case WM_T_ICH9:
3786 for (i = 0; i < 10; i++) {
3787 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3788 break;
3789 delay(1000);
3790 }
3791 if (i == 10) {
3792 log(LOG_ERR, "%s: auto read from eeprom failed to "
3793 "complete\n", device_xname(sc->sc_dev));
3794 }
3795 break;
3796 default:
3797 break;
3798 }
3799 }
3800
3801 void
3802 wm_lan_init_done(struct wm_softc *sc)
3803 {
3804 uint32_t reg = 0;
3805 int i;
3806
3807 /* wait for eeprom to reload */
3808 switch (sc->sc_type) {
3809 case WM_T_ICH10:
3810 case WM_T_PCH:
3811 for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3812 reg = CSR_READ(sc, WMREG_STATUS);
3813 if ((reg & STATUS_LAN_INIT_DONE) != 0)
3814 break;
3815 delay(100);
3816 }
3817 if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3818 log(LOG_ERR, "%s: %s: lan_init_done failed to "
3819 "complete\n", device_xname(sc->sc_dev), __func__);
3820 }
3821 break;
3822 default:
3823 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3824 __func__);
3825 break;
3826 }
3827
3828 reg &= ~STATUS_LAN_INIT_DONE;
3829 CSR_WRITE(sc, WMREG_STATUS, reg);
3830 }
3831
3832 void
3833 wm_get_cfg_done(struct wm_softc *sc)
3834 {
3835 int func = 0;
3836 int mask;
3837 uint32_t reg;
3838 int i;
3839
3840 /* wait for eeprom to reload */
3841 switch (sc->sc_type) {
3842 case WM_T_82542_2_0:
3843 case WM_T_82542_2_1:
3844 /* null */
3845 break;
3846 case WM_T_82543:
3847 case WM_T_82544:
3848 case WM_T_82540:
3849 case WM_T_82545:
3850 case WM_T_82545_3:
3851 case WM_T_82546:
3852 case WM_T_82546_3:
3853 case WM_T_82541:
3854 case WM_T_82541_2:
3855 case WM_T_82547:
3856 case WM_T_82547_2:
3857 case WM_T_82573:
3858 case WM_T_82574:
3859 case WM_T_82583:
3860 /* generic */
3861 delay(10*1000);
3862 break;
3863 case WM_T_80003:
3864 case WM_T_82571:
3865 case WM_T_82572:
3866 if (sc->sc_type == WM_T_80003)
3867 func = (CSR_READ(sc, WMREG_STATUS)
3868 >> STATUS_FUNCID_SHIFT) & 1;
3869 else
3870 func = 0; /* XXX Is it true for 82571? */
3871 mask = (func == 1) ? EEMNGCTL_CFGDONE_1 : EEMNGCTL_CFGDONE_0;
3872 for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3873 if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3874 break;
3875 delay(1000);
3876 }
3877 if (i >= WM_PHY_CFG_TIMEOUT) {
3878 DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3879 device_xname(sc->sc_dev), __func__));
3880 }
3881 break;
3882 case WM_T_ICH8:
3883 case WM_T_ICH9:
3884 case WM_T_ICH10:
3885 case WM_T_PCH:
3886 if (sc->sc_type >= WM_T_PCH) {
3887 reg = CSR_READ(sc, WMREG_STATUS);
3888 if ((reg & STATUS_PHYRA) != 0)
3889 CSR_WRITE(sc, WMREG_STATUS,
3890 reg & ~STATUS_PHYRA);
3891 }
3892 delay(10*1000);
3893 break;
3894 default:
3895 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3896 __func__);
3897 break;
3898 }
3899 }
3900
3901 /*
3902 * wm_acquire_eeprom:
3903 *
3904 * Perform the EEPROM handshake required on some chips.
3905 */
3906 static int
3907 wm_acquire_eeprom(struct wm_softc *sc)
3908 {
3909 uint32_t reg;
3910 int x;
3911 int ret = 0;
3912
3913 /* always success */
3914 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3915 return 0;
3916
3917 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3918 ret = wm_get_swfwhw_semaphore(sc);
3919 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3920 /* this will also do wm_get_swsm_semaphore() if needed */
3921 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3922 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3923 ret = wm_get_swsm_semaphore(sc);
3924 }
3925
3926 if (ret) {
3927 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3928 __func__);
3929 return 1;
3930 }
3931
3932 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3933 reg = CSR_READ(sc, WMREG_EECD);
3934
3935 /* Request EEPROM access. */
3936 reg |= EECD_EE_REQ;
3937 CSR_WRITE(sc, WMREG_EECD, reg);
3938
3939 /* ..and wait for it to be granted. */
3940 for (x = 0; x < 1000; x++) {
3941 reg = CSR_READ(sc, WMREG_EECD);
3942 if (reg & EECD_EE_GNT)
3943 break;
3944 delay(5);
3945 }
3946 if ((reg & EECD_EE_GNT) == 0) {
3947 aprint_error_dev(sc->sc_dev,
3948 "could not acquire EEPROM GNT\n");
3949 reg &= ~EECD_EE_REQ;
3950 CSR_WRITE(sc, WMREG_EECD, reg);
3951 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3952 wm_put_swfwhw_semaphore(sc);
3953 if (sc->sc_flags & WM_F_SWFW_SYNC)
3954 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3955 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3956 wm_put_swsm_semaphore(sc);
3957 return (1);
3958 }
3959 }
3960
3961 return (0);
3962 }
3963
3964 /*
3965 * wm_release_eeprom:
3966 *
3967 * Release the EEPROM mutex.
3968 */
3969 static void
3970 wm_release_eeprom(struct wm_softc *sc)
3971 {
3972 uint32_t reg;
3973
3974 /* always success */
3975 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3976 return;
3977
3978 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3979 reg = CSR_READ(sc, WMREG_EECD);
3980 reg &= ~EECD_EE_REQ;
3981 CSR_WRITE(sc, WMREG_EECD, reg);
3982 }
3983
3984 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3985 wm_put_swfwhw_semaphore(sc);
3986 if (sc->sc_flags & WM_F_SWFW_SYNC)
3987 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3988 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3989 wm_put_swsm_semaphore(sc);
3990 }
3991
3992 /*
3993 * wm_eeprom_sendbits:
3994 *
3995 * Send a series of bits to the EEPROM.
3996 */
3997 static void
3998 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3999 {
4000 uint32_t reg;
4001 int x;
4002
4003 reg = CSR_READ(sc, WMREG_EECD);
4004
4005 for (x = nbits; x > 0; x--) {
4006 if (bits & (1U << (x - 1)))
4007 reg |= EECD_DI;
4008 else
4009 reg &= ~EECD_DI;
4010 CSR_WRITE(sc, WMREG_EECD, reg);
4011 delay(2);
4012 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4013 delay(2);
4014 CSR_WRITE(sc, WMREG_EECD, reg);
4015 delay(2);
4016 }
4017 }
4018
4019 /*
4020 * wm_eeprom_recvbits:
4021 *
4022 * Receive a series of bits from the EEPROM.
4023 */
4024 static void
4025 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
4026 {
4027 uint32_t reg, val;
4028 int x;
4029
4030 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
4031
4032 val = 0;
4033 for (x = nbits; x > 0; x--) {
4034 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
4035 delay(2);
4036 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
4037 val |= (1U << (x - 1));
4038 CSR_WRITE(sc, WMREG_EECD, reg);
4039 delay(2);
4040 }
4041 *valp = val;
4042 }
4043
4044 /*
4045 * wm_read_eeprom_uwire:
4046 *
4047 * Read a word from the EEPROM using the MicroWire protocol.
4048 */
4049 static int
4050 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4051 {
4052 uint32_t reg, val;
4053 int i;
4054
4055 for (i = 0; i < wordcnt; i++) {
4056 /* Clear SK and DI. */
4057 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
4058 CSR_WRITE(sc, WMREG_EECD, reg);
4059
4060 /* Set CHIP SELECT. */
4061 reg |= EECD_CS;
4062 CSR_WRITE(sc, WMREG_EECD, reg);
4063 delay(2);
4064
4065 /* Shift in the READ command. */
4066 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
4067
4068 /* Shift in address. */
4069 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
4070
4071 /* Shift out the data. */
4072 wm_eeprom_recvbits(sc, &val, 16);
4073 data[i] = val & 0xffff;
4074
4075 /* Clear CHIP SELECT. */
4076 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
4077 CSR_WRITE(sc, WMREG_EECD, reg);
4078 delay(2);
4079 }
4080
4081 return (0);
4082 }
4083
4084 /*
4085 * wm_spi_eeprom_ready:
4086 *
4087 * Wait for a SPI EEPROM to be ready for commands.
4088 */
4089 static int
4090 wm_spi_eeprom_ready(struct wm_softc *sc)
4091 {
4092 uint32_t val;
4093 int usec;
4094
4095 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
4096 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
4097 wm_eeprom_recvbits(sc, &val, 8);
4098 if ((val & SPI_SR_RDY) == 0)
4099 break;
4100 }
4101 if (usec >= SPI_MAX_RETRIES) {
4102 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
4103 return (1);
4104 }
4105 return (0);
4106 }
4107
4108 /*
4109 * wm_read_eeprom_spi:
4110 *
4111 * Read a work from the EEPROM using the SPI protocol.
4112 */
4113 static int
4114 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4115 {
4116 uint32_t reg, val;
4117 int i;
4118 uint8_t opc;
4119
4120 /* Clear SK and CS. */
4121 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
4122 CSR_WRITE(sc, WMREG_EECD, reg);
4123 delay(2);
4124
4125 if (wm_spi_eeprom_ready(sc))
4126 return (1);
4127
4128 /* Toggle CS to flush commands. */
4129 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
4130 delay(2);
4131 CSR_WRITE(sc, WMREG_EECD, reg);
4132 delay(2);
4133
4134 opc = SPI_OPC_READ;
4135 if (sc->sc_ee_addrbits == 8 && word >= 128)
4136 opc |= SPI_OPC_A8;
4137
4138 wm_eeprom_sendbits(sc, opc, 8);
4139 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
4140
4141 for (i = 0; i < wordcnt; i++) {
4142 wm_eeprom_recvbits(sc, &val, 16);
4143 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
4144 }
4145
4146 /* Raise CS and clear SK. */
4147 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
4148 CSR_WRITE(sc, WMREG_EECD, reg);
4149 delay(2);
4150
4151 return (0);
4152 }
4153
4154 #define EEPROM_CHECKSUM 0xBABA
4155 #define EEPROM_SIZE 0x0040
4156
4157 /*
4158 * wm_validate_eeprom_checksum
4159 *
4160 * The checksum is defined as the sum of the first 64 (16 bit) words.
4161 */
4162 static int
4163 wm_validate_eeprom_checksum(struct wm_softc *sc)
4164 {
4165 uint16_t checksum;
4166 uint16_t eeprom_data;
4167 int i;
4168
4169 checksum = 0;
4170
4171 for (i = 0; i < EEPROM_SIZE; i++) {
4172 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
4173 return 1;
4174 checksum += eeprom_data;
4175 }
4176
4177 if (checksum != (uint16_t) EEPROM_CHECKSUM)
4178 return 1;
4179
4180 return 0;
4181 }
4182
4183 /*
4184 * wm_read_eeprom:
4185 *
4186 * Read data from the serial EEPROM.
4187 */
4188 static int
4189 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
4190 {
4191 int rv;
4192
4193 if (sc->sc_flags & WM_F_EEPROM_INVALID)
4194 return 1;
4195
4196 if (wm_acquire_eeprom(sc))
4197 return 1;
4198
4199 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4200 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4201 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
4202 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
4203 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
4204 else if (sc->sc_flags & WM_F_EEPROM_SPI)
4205 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
4206 else
4207 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
4208
4209 wm_release_eeprom(sc);
4210 return rv;
4211 }
4212
4213 static int
4214 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
4215 uint16_t *data)
4216 {
4217 int i, eerd = 0;
4218 int error = 0;
4219
4220 for (i = 0; i < wordcnt; i++) {
4221 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
4222
4223 CSR_WRITE(sc, WMREG_EERD, eerd);
4224 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
4225 if (error != 0)
4226 break;
4227
4228 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4229 }
4230
4231 return error;
4232 }
4233
4234 static int
4235 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4236 {
4237 uint32_t attempts = 100000;
4238 uint32_t i, reg = 0;
4239 int32_t done = -1;
4240
4241 for (i = 0; i < attempts; i++) {
4242 reg = CSR_READ(sc, rw);
4243
4244 if (reg & EERD_DONE) {
4245 done = 0;
4246 break;
4247 }
4248 delay(5);
4249 }
4250
4251 return done;
4252 }
4253
4254 /*
4255 * wm_add_rxbuf:
4256 *
4257 * Add a receive buffer to the indiciated descriptor.
4258 */
4259 static int
4260 wm_add_rxbuf(struct wm_softc *sc, int idx)
4261 {
4262 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4263 struct mbuf *m;
4264 int error;
4265
4266 MGETHDR(m, M_DONTWAIT, MT_DATA);
4267 if (m == NULL)
4268 return (ENOBUFS);
4269
4270 MCLGET(m, M_DONTWAIT);
4271 if ((m->m_flags & M_EXT) == 0) {
4272 m_freem(m);
4273 return (ENOBUFS);
4274 }
4275
4276 if (rxs->rxs_mbuf != NULL)
4277 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4278
4279 rxs->rxs_mbuf = m;
4280
4281 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4282 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4283 BUS_DMA_READ|BUS_DMA_NOWAIT);
4284 if (error) {
4285 /* XXX XXX XXX */
4286 aprint_error_dev(sc->sc_dev,
4287 "unable to load rx DMA map %d, error = %d\n",
4288 idx, error);
4289 panic("wm_add_rxbuf");
4290 }
4291
4292 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4293 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4294
4295 WM_INIT_RXDESC(sc, idx);
4296
4297 return (0);
4298 }
4299
4300 /*
4301 * wm_set_ral:
4302 *
4303 * Set an entery in the receive address list.
4304 */
4305 static void
4306 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4307 {
4308 uint32_t ral_lo, ral_hi;
4309
4310 if (enaddr != NULL) {
4311 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4312 (enaddr[3] << 24);
4313 ral_hi = enaddr[4] | (enaddr[5] << 8);
4314 ral_hi |= RAL_AV;
4315 } else {
4316 ral_lo = 0;
4317 ral_hi = 0;
4318 }
4319
4320 if (sc->sc_type >= WM_T_82544) {
4321 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4322 ral_lo);
4323 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4324 ral_hi);
4325 } else {
4326 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4327 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4328 }
4329 }
4330
4331 /*
4332 * wm_mchash:
4333 *
4334 * Compute the hash of the multicast address for the 4096-bit
4335 * multicast filter.
4336 */
4337 static uint32_t
4338 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4339 {
4340 static const int lo_shift[4] = { 4, 3, 2, 0 };
4341 static const int hi_shift[4] = { 4, 5, 6, 8 };
4342 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4343 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4344 uint32_t hash;
4345
4346 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4347 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)) {
4348 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4349 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4350 return (hash & 0x3ff);
4351 }
4352 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4353 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4354
4355 return (hash & 0xfff);
4356 }
4357
4358 /*
4359 * wm_set_filter:
4360 *
4361 * Set up the receive filter.
4362 */
4363 static void
4364 wm_set_filter(struct wm_softc *sc)
4365 {
4366 struct ethercom *ec = &sc->sc_ethercom;
4367 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4368 struct ether_multi *enm;
4369 struct ether_multistep step;
4370 bus_addr_t mta_reg;
4371 uint32_t hash, reg, bit;
4372 int i, size;
4373
4374 if (sc->sc_type >= WM_T_82544)
4375 mta_reg = WMREG_CORDOVA_MTA;
4376 else
4377 mta_reg = WMREG_MTA;
4378
4379 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4380
4381 if (ifp->if_flags & IFF_BROADCAST)
4382 sc->sc_rctl |= RCTL_BAM;
4383 if (ifp->if_flags & IFF_PROMISC) {
4384 sc->sc_rctl |= RCTL_UPE;
4385 goto allmulti;
4386 }
4387
4388 /*
4389 * Set the station address in the first RAL slot, and
4390 * clear the remaining slots.
4391 */
4392 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4393 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4394 size = WM_ICH8_RAL_TABSIZE;
4395 else
4396 size = WM_RAL_TABSIZE;
4397 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4398 for (i = 1; i < size; i++)
4399 wm_set_ral(sc, NULL, i);
4400
4401 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4402 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4403 size = WM_ICH8_MC_TABSIZE;
4404 else
4405 size = WM_MC_TABSIZE;
4406 /* Clear out the multicast table. */
4407 for (i = 0; i < size; i++)
4408 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4409
4410 ETHER_FIRST_MULTI(step, ec, enm);
4411 while (enm != NULL) {
4412 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4413 /*
4414 * We must listen to a range of multicast addresses.
4415 * For now, just accept all multicasts, rather than
4416 * trying to set only those filter bits needed to match
4417 * the range. (At this time, the only use of address
4418 * ranges is for IP multicast routing, for which the
4419 * range is big enough to require all bits set.)
4420 */
4421 goto allmulti;
4422 }
4423
4424 hash = wm_mchash(sc, enm->enm_addrlo);
4425
4426 reg = (hash >> 5);
4427 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4428 || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH))
4429 reg &= 0x1f;
4430 else
4431 reg &= 0x7f;
4432 bit = hash & 0x1f;
4433
4434 hash = CSR_READ(sc, mta_reg + (reg << 2));
4435 hash |= 1U << bit;
4436
4437 /* XXX Hardware bug?? */
4438 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4439 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4440 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4441 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4442 } else
4443 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4444
4445 ETHER_NEXT_MULTI(step, enm);
4446 }
4447
4448 ifp->if_flags &= ~IFF_ALLMULTI;
4449 goto setit;
4450
4451 allmulti:
4452 ifp->if_flags |= IFF_ALLMULTI;
4453 sc->sc_rctl |= RCTL_MPE;
4454
4455 setit:
4456 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4457 }
4458
4459 /*
4460 * wm_tbi_mediainit:
4461 *
4462 * Initialize media for use on 1000BASE-X devices.
4463 */
4464 static void
4465 wm_tbi_mediainit(struct wm_softc *sc)
4466 {
4467 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4468 const char *sep = "";
4469
4470 if (sc->sc_type < WM_T_82543)
4471 sc->sc_tipg = TIPG_WM_DFLT;
4472 else
4473 sc->sc_tipg = TIPG_LG_DFLT;
4474
4475 sc->sc_tbi_anegticks = 5;
4476
4477 /* Initialize our media structures */
4478 sc->sc_mii.mii_ifp = ifp;
4479
4480 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4481 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4482 wm_tbi_mediastatus);
4483
4484 /*
4485 * SWD Pins:
4486 *
4487 * 0 = Link LED (output)
4488 * 1 = Loss Of Signal (input)
4489 */
4490 sc->sc_ctrl |= CTRL_SWDPIO(0);
4491 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4492
4493 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4494
4495 #define ADD(ss, mm, dd) \
4496 do { \
4497 aprint_normal("%s%s", sep, ss); \
4498 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4499 sep = ", "; \
4500 } while (/*CONSTCOND*/0)
4501
4502 aprint_normal_dev(sc->sc_dev, "");
4503 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4504 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4505 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4506 aprint_normal("\n");
4507
4508 #undef ADD
4509
4510 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4511 }
4512
4513 /*
4514 * wm_tbi_mediastatus: [ifmedia interface function]
4515 *
4516 * Get the current interface media status on a 1000BASE-X device.
4517 */
4518 static void
4519 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4520 {
4521 struct wm_softc *sc = ifp->if_softc;
4522 uint32_t ctrl, status;
4523
4524 ifmr->ifm_status = IFM_AVALID;
4525 ifmr->ifm_active = IFM_ETHER;
4526
4527 status = CSR_READ(sc, WMREG_STATUS);
4528 if ((status & STATUS_LU) == 0) {
4529 ifmr->ifm_active |= IFM_NONE;
4530 return;
4531 }
4532
4533 ifmr->ifm_status |= IFM_ACTIVE;
4534 ifmr->ifm_active |= IFM_1000_SX;
4535 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4536 ifmr->ifm_active |= IFM_FDX;
4537 ctrl = CSR_READ(sc, WMREG_CTRL);
4538 if (ctrl & CTRL_RFCE)
4539 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4540 if (ctrl & CTRL_TFCE)
4541 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4542 }
4543
4544 /*
4545 * wm_tbi_mediachange: [ifmedia interface function]
4546 *
4547 * Set hardware to newly-selected media on a 1000BASE-X device.
4548 */
4549 static int
4550 wm_tbi_mediachange(struct ifnet *ifp)
4551 {
4552 struct wm_softc *sc = ifp->if_softc;
4553 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4554 uint32_t status;
4555 int i;
4556
4557 sc->sc_txcw = 0;
4558 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4559 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4560 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4561 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4562 sc->sc_txcw |= TXCW_ANE;
4563 } else {
4564 /*
4565 * If autonegotiation is turned off, force link up and turn on
4566 * full duplex
4567 */
4568 sc->sc_txcw &= ~TXCW_ANE;
4569 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4570 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4571 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4572 delay(1000);
4573 }
4574
4575 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4576 device_xname(sc->sc_dev),sc->sc_txcw));
4577 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4578 delay(10000);
4579
4580 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4581 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4582
4583 /*
4584 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4585 * optics detect a signal, 0 if they don't.
4586 */
4587 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4588 /* Have signal; wait for the link to come up. */
4589
4590 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4591 /*
4592 * Reset the link, and let autonegotiation do its thing
4593 */
4594 sc->sc_ctrl |= CTRL_LRST;
4595 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4596 delay(1000);
4597 sc->sc_ctrl &= ~CTRL_LRST;
4598 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4599 delay(1000);
4600 }
4601
4602 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4603 delay(10000);
4604 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4605 break;
4606 }
4607
4608 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4609 device_xname(sc->sc_dev),i));
4610
4611 status = CSR_READ(sc, WMREG_STATUS);
4612 DPRINTF(WM_DEBUG_LINK,
4613 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4614 device_xname(sc->sc_dev),status, STATUS_LU));
4615 if (status & STATUS_LU) {
4616 /* Link is up. */
4617 DPRINTF(WM_DEBUG_LINK,
4618 ("%s: LINK: set media -> link up %s\n",
4619 device_xname(sc->sc_dev),
4620 (status & STATUS_FD) ? "FDX" : "HDX"));
4621
4622 /*
4623 * NOTE: CTRL will update TFCE and RFCE automatically,
4624 * so we should update sc->sc_ctrl
4625 */
4626 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4627 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4628 sc->sc_fcrtl &= ~FCRTL_XONE;
4629 if (status & STATUS_FD)
4630 sc->sc_tctl |=
4631 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4632 else
4633 sc->sc_tctl |=
4634 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4635 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4636 sc->sc_fcrtl |= FCRTL_XONE;
4637 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4638 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4639 WMREG_OLD_FCRTL : WMREG_FCRTL,
4640 sc->sc_fcrtl);
4641 sc->sc_tbi_linkup = 1;
4642 } else {
4643 if (i == WM_LINKUP_TIMEOUT)
4644 wm_check_for_link(sc);
4645 /* Link is down. */
4646 DPRINTF(WM_DEBUG_LINK,
4647 ("%s: LINK: set media -> link down\n",
4648 device_xname(sc->sc_dev)));
4649 sc->sc_tbi_linkup = 0;
4650 }
4651 } else {
4652 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4653 device_xname(sc->sc_dev)));
4654 sc->sc_tbi_linkup = 0;
4655 }
4656
4657 wm_tbi_set_linkled(sc);
4658
4659 return (0);
4660 }
4661
4662 /*
4663 * wm_tbi_set_linkled:
4664 *
4665 * Update the link LED on 1000BASE-X devices.
4666 */
4667 static void
4668 wm_tbi_set_linkled(struct wm_softc *sc)
4669 {
4670
4671 if (sc->sc_tbi_linkup)
4672 sc->sc_ctrl |= CTRL_SWDPIN(0);
4673 else
4674 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4675
4676 /* 82540 or newer devices are active low */
4677 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4678
4679 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4680 }
4681
4682 /*
4683 * wm_tbi_check_link:
4684 *
4685 * Check the link on 1000BASE-X devices.
4686 */
4687 static void
4688 wm_tbi_check_link(struct wm_softc *sc)
4689 {
4690 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4691 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4692 uint32_t rxcw, ctrl, status;
4693
4694 status = CSR_READ(sc, WMREG_STATUS);
4695
4696 rxcw = CSR_READ(sc, WMREG_RXCW);
4697 ctrl = CSR_READ(sc, WMREG_CTRL);
4698
4699 /* set link status */
4700 if ((status & STATUS_LU) == 0) {
4701 DPRINTF(WM_DEBUG_LINK,
4702 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4703 sc->sc_tbi_linkup = 0;
4704 } else if (sc->sc_tbi_linkup == 0) {
4705 DPRINTF(WM_DEBUG_LINK,
4706 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4707 (status & STATUS_FD) ? "FDX" : "HDX"));
4708 sc->sc_tbi_linkup = 1;
4709 }
4710
4711 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4712 && ((status & STATUS_LU) == 0)) {
4713 sc->sc_tbi_linkup = 0;
4714 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4715 /* RXCFG storm! */
4716 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4717 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4718 wm_init(ifp);
4719 wm_start(ifp);
4720 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4721 /* If the timer expired, retry autonegotiation */
4722 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4723 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4724 sc->sc_tbi_ticks = 0;
4725 /*
4726 * Reset the link, and let autonegotiation do
4727 * its thing
4728 */
4729 sc->sc_ctrl |= CTRL_LRST;
4730 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4731 delay(1000);
4732 sc->sc_ctrl &= ~CTRL_LRST;
4733 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4734 delay(1000);
4735 CSR_WRITE(sc, WMREG_TXCW,
4736 sc->sc_txcw & ~TXCW_ANE);
4737 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4738 }
4739 }
4740 }
4741
4742 wm_tbi_set_linkled(sc);
4743 }
4744
4745 /*
4746 * wm_gmii_reset:
4747 *
4748 * Reset the PHY.
4749 */
4750 static void
4751 wm_gmii_reset(struct wm_softc *sc)
4752 {
4753 uint32_t reg;
4754 int func = 0; /* XXX gcc */
4755 int rv;
4756
4757 /* get phy semaphore */
4758 switch (sc->sc_type) {
4759 case WM_T_82571:
4760 case WM_T_82572:
4761 case WM_T_82573:
4762 case WM_T_82574:
4763 case WM_T_82583:
4764 /* XXX sould get sw semaphore, too */
4765 rv = wm_get_swsm_semaphore(sc);
4766 break;
4767 case WM_T_80003:
4768 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4769 rv = wm_get_swfw_semaphore(sc,
4770 func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4771 break;
4772 case WM_T_ICH8:
4773 case WM_T_ICH9:
4774 case WM_T_ICH10:
4775 case WM_T_PCH:
4776 rv = wm_get_swfwhw_semaphore(sc);
4777 break;
4778 default:
4779 /* nothing to do*/
4780 rv = 0;
4781 break;
4782 }
4783 if (rv != 0) {
4784 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4785 __func__);
4786 return;
4787 }
4788
4789 switch (sc->sc_type) {
4790 case WM_T_82542_2_0:
4791 case WM_T_82542_2_1:
4792 /* null */
4793 break;
4794 case WM_T_82543:
4795 /*
4796 * With 82543, we need to force speed and duplex on the MAC
4797 * equal to what the PHY speed and duplex configuration is.
4798 * In addition, we need to perform a hardware reset on the PHY
4799 * to take it out of reset.
4800 */
4801 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4802 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4803
4804 /* The PHY reset pin is active-low. */
4805 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4806 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4807 CTRL_EXT_SWDPIN(4));
4808 reg |= CTRL_EXT_SWDPIO(4);
4809
4810 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4811 delay(10*1000);
4812
4813 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4814 delay(150);
4815 #if 0
4816 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4817 #endif
4818 delay(20*1000); /* XXX extra delay to get PHY ID? */
4819 break;
4820 case WM_T_82544: /* reset 10000us */
4821 case WM_T_82540:
4822 case WM_T_82545:
4823 case WM_T_82545_3:
4824 case WM_T_82546:
4825 case WM_T_82546_3:
4826 case WM_T_82541:
4827 case WM_T_82541_2:
4828 case WM_T_82547:
4829 case WM_T_82547_2:
4830 case WM_T_82571: /* reset 100us */
4831 case WM_T_82572:
4832 case WM_T_82573:
4833 case WM_T_82574:
4834 case WM_T_82583:
4835 case WM_T_80003:
4836 /* generic reset */
4837 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4838 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4839 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4840 delay(150);
4841
4842 if ((sc->sc_type == WM_T_82541)
4843 || (sc->sc_type == WM_T_82541_2)
4844 || (sc->sc_type == WM_T_82547)
4845 || (sc->sc_type == WM_T_82547_2)) {
4846 /* workaround for igp are done in igp_reset() */
4847 /* XXX add code to set LED after phy reset */
4848 }
4849 break;
4850 case WM_T_ICH8:
4851 case WM_T_ICH9:
4852 case WM_T_ICH10:
4853 case WM_T_PCH:
4854 /* generic reset */
4855 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4856 delay(100);
4857 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4858 delay(150);
4859
4860 /* Allow time for h/w to get to a quiescent state afer reset */
4861 delay(10*1000);
4862
4863 /* XXX add code to set LED after phy reset */
4864 break;
4865 default:
4866 panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4867 __func__);
4868 break;
4869 }
4870
4871 /* release PHY semaphore */
4872 switch (sc->sc_type) {
4873 case WM_T_82571:
4874 case WM_T_82572:
4875 case WM_T_82573:
4876 case WM_T_82574:
4877 case WM_T_82583:
4878 /* XXX sould put sw semaphore, too */
4879 wm_put_swsm_semaphore(sc);
4880 break;
4881 case WM_T_80003:
4882 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4883 break;
4884 case WM_T_ICH8:
4885 case WM_T_ICH9:
4886 case WM_T_ICH10:
4887 case WM_T_PCH:
4888 wm_put_swfwhw_semaphore(sc);
4889 break;
4890 default:
4891 /* nothing to do*/
4892 rv = 0;
4893 break;
4894 }
4895
4896 /* get_cfg_done */
4897 wm_get_cfg_done(sc);
4898
4899 /* extra setup */
4900 switch (sc->sc_type) {
4901 case WM_T_82542_2_0:
4902 case WM_T_82542_2_1:
4903 case WM_T_82543:
4904 case WM_T_82544:
4905 case WM_T_82540:
4906 case WM_T_82545:
4907 case WM_T_82545_3:
4908 case WM_T_82546:
4909 case WM_T_82546_3:
4910 case WM_T_82541_2:
4911 case WM_T_82547_2:
4912 case WM_T_82571:
4913 case WM_T_82572:
4914 case WM_T_82573:
4915 case WM_T_82574:
4916 case WM_T_82583:
4917 case WM_T_80003:
4918 /* null */
4919 break;
4920 case WM_T_82541:
4921 case WM_T_82547:
4922 /* XXX Configure actively LED after PHY reset */
4923 break;
4924 case WM_T_ICH8:
4925 case WM_T_ICH9:
4926 case WM_T_ICH10:
4927 case WM_T_PCH:
4928 delay(10*1000);
4929
4930 if (sc->sc_type == WM_T_PCH) {
4931 /* XXX hv_phy_workaround */
4932
4933 /* dummy read from WUC */
4934 }
4935 /* XXX SW LCD configuration from NVM */
4936
4937 if (sc->sc_type == WM_T_PCH) {
4938 /* XXX Configure the LCD with the OEM bits in NVM */
4939 }
4940 break;
4941 default:
4942 panic("%s: unknown type\n", __func__);
4943 break;
4944 }
4945 }
4946
4947 /*
4948 * wm_gmii_mediainit:
4949 *
4950 * Initialize media for use on 1000BASE-T devices.
4951 */
4952 static void
4953 wm_gmii_mediainit(struct wm_softc *sc)
4954 {
4955 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4956
4957 /* We have MII. */
4958 sc->sc_flags |= WM_F_HAS_MII;
4959
4960 if (sc->sc_type == WM_T_80003)
4961 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4962 else
4963 sc->sc_tipg = TIPG_1000T_DFLT;
4964
4965 /*
4966 * Let the chip set speed/duplex on its own based on
4967 * signals from the PHY.
4968 * XXXbouyer - I'm not sure this is right for the 80003,
4969 * the em driver only sets CTRL_SLU here - but it seems to work.
4970 */
4971 sc->sc_ctrl |= CTRL_SLU;
4972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4973
4974 /* Initialize our media structures and probe the GMII. */
4975 sc->sc_mii.mii_ifp = ifp;
4976
4977 if (sc->sc_type >= WM_T_80003) {
4978 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4979 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4980 } else if (sc->sc_type >= WM_T_82544) {
4981 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4982 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4983 } else {
4984 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4985 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4986 }
4987 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4988
4989 wm_gmii_reset(sc);
4990
4991 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4992 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4993 wm_gmii_mediastatus);
4994
4995 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4996 MII_OFFSET_ANY, MIIF_DOPAUSE);
4997
4998 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4999 /* if failed, retry with *_bm_* */
5000 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5001 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5002
5003 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
5004 MII_OFFSET_ANY, MIIF_DOPAUSE);
5005 }
5006 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
5007 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
5008 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
5009 } else {
5010 if (sc->sc_type >= WM_T_82574) {
5011 struct mii_softc *child;
5012
5013 child = LIST_FIRST(&sc->sc_mii.mii_phys);
5014 /* fix read/write functions as e1000 driver */
5015 if (device_is_a(child->mii_dev, "igphy")) {
5016 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
5017 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
5018 } else {
5019 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
5020 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
5021 }
5022 }
5023
5024 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
5025 }
5026 }
5027
5028 /*
5029 * wm_gmii_mediastatus: [ifmedia interface function]
5030 *
5031 * Get the current interface media status on a 1000BASE-T device.
5032 */
5033 static void
5034 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5035 {
5036 struct wm_softc *sc = ifp->if_softc;
5037
5038 ether_mediastatus(ifp, ifmr);
5039 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
5040 sc->sc_flowflags;
5041 }
5042
5043 /*
5044 * wm_gmii_mediachange: [ifmedia interface function]
5045 *
5046 * Set hardware to newly-selected media on a 1000BASE-T device.
5047 */
5048 static int
5049 wm_gmii_mediachange(struct ifnet *ifp)
5050 {
5051 struct wm_softc *sc = ifp->if_softc;
5052 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5053 int rc;
5054
5055 if ((ifp->if_flags & IFF_UP) == 0)
5056 return 0;
5057
5058 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
5059 sc->sc_ctrl |= CTRL_SLU;
5060 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
5061 || (sc->sc_type > WM_T_82543)) {
5062 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
5063 } else {
5064 sc->sc_ctrl &= ~CTRL_ASDE;
5065 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
5066 if (ife->ifm_media & IFM_FDX)
5067 sc->sc_ctrl |= CTRL_FD;
5068 switch(IFM_SUBTYPE(ife->ifm_media)) {
5069 case IFM_10_T:
5070 sc->sc_ctrl |= CTRL_SPEED_10;
5071 break;
5072 case IFM_100_TX:
5073 sc->sc_ctrl |= CTRL_SPEED_100;
5074 break;
5075 case IFM_1000_T:
5076 sc->sc_ctrl |= CTRL_SPEED_1000;
5077 break;
5078 default:
5079 panic("wm_gmii_mediachange: bad media 0x%x",
5080 ife->ifm_media);
5081 }
5082 }
5083 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5084 if (sc->sc_type <= WM_T_82543)
5085 wm_gmii_reset(sc);
5086
5087 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
5088 return 0;
5089 return rc;
5090 }
5091
5092 #define MDI_IO CTRL_SWDPIN(2)
5093 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
5094 #define MDI_CLK CTRL_SWDPIN(3)
5095
5096 static void
5097 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
5098 {
5099 uint32_t i, v;
5100
5101 v = CSR_READ(sc, WMREG_CTRL);
5102 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5103 v |= MDI_DIR | CTRL_SWDPIO(3);
5104
5105 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
5106 if (data & i)
5107 v |= MDI_IO;
5108 else
5109 v &= ~MDI_IO;
5110 CSR_WRITE(sc, WMREG_CTRL, v);
5111 delay(10);
5112 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5113 delay(10);
5114 CSR_WRITE(sc, WMREG_CTRL, v);
5115 delay(10);
5116 }
5117 }
5118
5119 static uint32_t
5120 i82543_mii_recvbits(struct wm_softc *sc)
5121 {
5122 uint32_t v, i, data = 0;
5123
5124 v = CSR_READ(sc, WMREG_CTRL);
5125 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
5126 v |= CTRL_SWDPIO(3);
5127
5128 CSR_WRITE(sc, WMREG_CTRL, v);
5129 delay(10);
5130 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5131 delay(10);
5132 CSR_WRITE(sc, WMREG_CTRL, v);
5133 delay(10);
5134
5135 for (i = 0; i < 16; i++) {
5136 data <<= 1;
5137 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5138 delay(10);
5139 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
5140 data |= 1;
5141 CSR_WRITE(sc, WMREG_CTRL, v);
5142 delay(10);
5143 }
5144
5145 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
5146 delay(10);
5147 CSR_WRITE(sc, WMREG_CTRL, v);
5148 delay(10);
5149
5150 return (data);
5151 }
5152
5153 #undef MDI_IO
5154 #undef MDI_DIR
5155 #undef MDI_CLK
5156
5157 /*
5158 * wm_gmii_i82543_readreg: [mii interface function]
5159 *
5160 * Read a PHY register on the GMII (i82543 version).
5161 */
5162 static int
5163 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
5164 {
5165 struct wm_softc *sc = device_private(self);
5166 int rv;
5167
5168 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5169 i82543_mii_sendbits(sc, reg | (phy << 5) |
5170 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
5171 rv = i82543_mii_recvbits(sc) & 0xffff;
5172
5173 DPRINTF(WM_DEBUG_GMII,
5174 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
5175 device_xname(sc->sc_dev), phy, reg, rv));
5176
5177 return (rv);
5178 }
5179
5180 /*
5181 * wm_gmii_i82543_writereg: [mii interface function]
5182 *
5183 * Write a PHY register on the GMII (i82543 version).
5184 */
5185 static void
5186 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
5187 {
5188 struct wm_softc *sc = device_private(self);
5189
5190 i82543_mii_sendbits(sc, 0xffffffffU, 32);
5191 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
5192 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
5193 (MII_COMMAND_START << 30), 32);
5194 }
5195
5196 /*
5197 * wm_gmii_i82544_readreg: [mii interface function]
5198 *
5199 * Read a PHY register on the GMII.
5200 */
5201 static int
5202 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
5203 {
5204 struct wm_softc *sc = device_private(self);
5205 uint32_t mdic = 0;
5206 int i, rv;
5207
5208 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
5209 MDIC_REGADD(reg));
5210
5211 for (i = 0; i < 320; i++) {
5212 mdic = CSR_READ(sc, WMREG_MDIC);
5213 if (mdic & MDIC_READY)
5214 break;
5215 delay(10);
5216 }
5217
5218 if ((mdic & MDIC_READY) == 0) {
5219 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
5220 device_xname(sc->sc_dev), phy, reg);
5221 rv = 0;
5222 } else if (mdic & MDIC_E) {
5223 #if 0 /* This is normal if no PHY is present. */
5224 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
5225 device_xname(sc->sc_dev), phy, reg);
5226 #endif
5227 rv = 0;
5228 } else {
5229 rv = MDIC_DATA(mdic);
5230 if (rv == 0xffff)
5231 rv = 0;
5232 }
5233
5234 return (rv);
5235 }
5236
5237 /*
5238 * wm_gmii_i82544_writereg: [mii interface function]
5239 *
5240 * Write a PHY register on the GMII.
5241 */
5242 static void
5243 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
5244 {
5245 struct wm_softc *sc = device_private(self);
5246 uint32_t mdic = 0;
5247 int i;
5248
5249 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
5250 MDIC_REGADD(reg) | MDIC_DATA(val));
5251
5252 for (i = 0; i < 320; i++) {
5253 mdic = CSR_READ(sc, WMREG_MDIC);
5254 if (mdic & MDIC_READY)
5255 break;
5256 delay(10);
5257 }
5258
5259 if ((mdic & MDIC_READY) == 0)
5260 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
5261 device_xname(sc->sc_dev), phy, reg);
5262 else if (mdic & MDIC_E)
5263 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
5264 device_xname(sc->sc_dev), phy, reg);
5265 }
5266
5267 /*
5268 * wm_gmii_i80003_readreg: [mii interface function]
5269 *
5270 * Read a PHY register on the kumeran
5271 * This could be handled by the PHY layer if we didn't have to lock the
5272 * ressource ...
5273 */
5274 static int
5275 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
5276 {
5277 struct wm_softc *sc = device_private(self);
5278 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5279 int rv;
5280
5281 if (phy != 1) /* only one PHY on kumeran bus */
5282 return 0;
5283
5284 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5285 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5286 __func__);
5287 return 0;
5288 }
5289
5290 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5291 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5292 reg >> GG82563_PAGE_SHIFT);
5293 } else {
5294 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5295 reg >> GG82563_PAGE_SHIFT);
5296 }
5297 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5298 delay(200);
5299 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5300 delay(200);
5301
5302 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5303 return (rv);
5304 }
5305
5306 /*
5307 * wm_gmii_i80003_writereg: [mii interface function]
5308 *
5309 * Write a PHY register on the kumeran.
5310 * This could be handled by the PHY layer if we didn't have to lock the
5311 * ressource ...
5312 */
5313 static void
5314 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5315 {
5316 struct wm_softc *sc = device_private(self);
5317 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5318
5319 if (phy != 1) /* only one PHY on kumeran bus */
5320 return;
5321
5322 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5323 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5324 __func__);
5325 return;
5326 }
5327
5328 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5329 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5330 reg >> GG82563_PAGE_SHIFT);
5331 } else {
5332 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5333 reg >> GG82563_PAGE_SHIFT);
5334 }
5335 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5336 delay(200);
5337 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5338 delay(200);
5339
5340 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5341 }
5342
5343 /*
5344 * wm_gmii_bm_readreg: [mii interface function]
5345 *
5346 * Read a PHY register on the kumeran
5347 * This could be handled by the PHY layer if we didn't have to lock the
5348 * ressource ...
5349 */
5350 static int
5351 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5352 {
5353 struct wm_softc *sc = device_private(self);
5354 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5355 int rv;
5356
5357 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5358 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5359 __func__);
5360 return 0;
5361 }
5362
5363 if (reg > GG82563_MAX_REG_ADDRESS) {
5364 if (phy == 1)
5365 wm_gmii_i82544_writereg(self, phy, 0x1f,
5366 reg);
5367 else
5368 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5369 reg >> GG82563_PAGE_SHIFT);
5370
5371 }
5372
5373 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5374 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5375 return (rv);
5376 }
5377
5378 /*
5379 * wm_gmii_bm_writereg: [mii interface function]
5380 *
5381 * Write a PHY register on the kumeran.
5382 * This could be handled by the PHY layer if we didn't have to lock the
5383 * ressource ...
5384 */
5385 static void
5386 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5387 {
5388 struct wm_softc *sc = device_private(self);
5389 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5390
5391 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5392 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5393 __func__);
5394 return;
5395 }
5396
5397 if (reg > GG82563_MAX_REG_ADDRESS) {
5398 if (phy == 1)
5399 wm_gmii_i82544_writereg(self, phy, 0x1f,
5400 reg);
5401 else
5402 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5403 reg >> GG82563_PAGE_SHIFT);
5404
5405 }
5406
5407 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5408 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5409 }
5410
5411 /*
5412 * wm_gmii_statchg: [mii interface function]
5413 *
5414 * Callback from MII layer when media changes.
5415 */
5416 static void
5417 wm_gmii_statchg(device_t self)
5418 {
5419 struct wm_softc *sc = device_private(self);
5420 struct mii_data *mii = &sc->sc_mii;
5421
5422 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5423 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5424 sc->sc_fcrtl &= ~FCRTL_XONE;
5425
5426 /*
5427 * Get flow control negotiation result.
5428 */
5429 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5430 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5431 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5432 mii->mii_media_active &= ~IFM_ETH_FMASK;
5433 }
5434
5435 if (sc->sc_flowflags & IFM_FLOW) {
5436 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5437 sc->sc_ctrl |= CTRL_TFCE;
5438 sc->sc_fcrtl |= FCRTL_XONE;
5439 }
5440 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5441 sc->sc_ctrl |= CTRL_RFCE;
5442 }
5443
5444 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5445 DPRINTF(WM_DEBUG_LINK,
5446 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5447 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5448 } else {
5449 DPRINTF(WM_DEBUG_LINK,
5450 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5451 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5452 }
5453
5454 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5455 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5456 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5457 : WMREG_FCRTL, sc->sc_fcrtl);
5458 if (sc->sc_type == WM_T_80003) {
5459 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5460 case IFM_1000_T:
5461 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5462 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5463 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5464 break;
5465 default:
5466 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5467 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5468 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5469 break;
5470 }
5471 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5472 }
5473 }
5474
5475 /*
5476 * wm_kmrn_readreg:
5477 *
5478 * Read a kumeran register
5479 */
5480 static int
5481 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5482 {
5483 int rv;
5484
5485 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5486 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5487 aprint_error_dev(sc->sc_dev,
5488 "%s: failed to get semaphore\n", __func__);
5489 return 0;
5490 }
5491 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5492 if (wm_get_swfwhw_semaphore(sc)) {
5493 aprint_error_dev(sc->sc_dev,
5494 "%s: failed to get semaphore\n", __func__);
5495 return 0;
5496 }
5497 }
5498
5499 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5500 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5501 KUMCTRLSTA_REN);
5502 delay(2);
5503
5504 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5505
5506 if (sc->sc_flags == WM_F_SWFW_SYNC)
5507 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5508 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5509 wm_put_swfwhw_semaphore(sc);
5510
5511 return (rv);
5512 }
5513
5514 /*
5515 * wm_kmrn_writereg:
5516 *
5517 * Write a kumeran register
5518 */
5519 static void
5520 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5521 {
5522
5523 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5524 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5525 aprint_error_dev(sc->sc_dev,
5526 "%s: failed to get semaphore\n", __func__);
5527 return;
5528 }
5529 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5530 if (wm_get_swfwhw_semaphore(sc)) {
5531 aprint_error_dev(sc->sc_dev,
5532 "%s: failed to get semaphore\n", __func__);
5533 return;
5534 }
5535 }
5536
5537 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5538 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5539 (val & KUMCTRLSTA_MASK));
5540
5541 if (sc->sc_flags == WM_F_SWFW_SYNC)
5542 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5543 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5544 wm_put_swfwhw_semaphore(sc);
5545 }
5546
5547 static int
5548 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5549 {
5550 uint32_t eecd = 0;
5551
5552 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5553 || sc->sc_type == WM_T_82583) {
5554 eecd = CSR_READ(sc, WMREG_EECD);
5555
5556 /* Isolate bits 15 & 16 */
5557 eecd = ((eecd >> 15) & 0x03);
5558
5559 /* If both bits are set, device is Flash type */
5560 if (eecd == 0x03)
5561 return 0;
5562 }
5563 return 1;
5564 }
5565
5566 static int
5567 wm_get_swsm_semaphore(struct wm_softc *sc)
5568 {
5569 int32_t timeout;
5570 uint32_t swsm;
5571
5572 /* Get the FW semaphore. */
5573 timeout = 1000 + 1; /* XXX */
5574 while (timeout) {
5575 swsm = CSR_READ(sc, WMREG_SWSM);
5576 swsm |= SWSM_SWESMBI;
5577 CSR_WRITE(sc, WMREG_SWSM, swsm);
5578 /* if we managed to set the bit we got the semaphore. */
5579 swsm = CSR_READ(sc, WMREG_SWSM);
5580 if (swsm & SWSM_SWESMBI)
5581 break;
5582
5583 delay(50);
5584 timeout--;
5585 }
5586
5587 if (timeout == 0) {
5588 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5589 /* Release semaphores */
5590 wm_put_swsm_semaphore(sc);
5591 return 1;
5592 }
5593 return 0;
5594 }
5595
5596 static void
5597 wm_put_swsm_semaphore(struct wm_softc *sc)
5598 {
5599 uint32_t swsm;
5600
5601 swsm = CSR_READ(sc, WMREG_SWSM);
5602 swsm &= ~(SWSM_SWESMBI);
5603 CSR_WRITE(sc, WMREG_SWSM, swsm);
5604 }
5605
5606 static int
5607 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5608 {
5609 uint32_t swfw_sync;
5610 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5611 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5612 int timeout = 200;
5613
5614 for(timeout = 0; timeout < 200; timeout++) {
5615 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5616 if (wm_get_swsm_semaphore(sc)) {
5617 aprint_error_dev(sc->sc_dev,
5618 "%s: failed to get semaphore\n",
5619 __func__);
5620 return 1;
5621 }
5622 }
5623 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5624 if ((swfw_sync & (swmask | fwmask)) == 0) {
5625 swfw_sync |= swmask;
5626 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5627 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5628 wm_put_swsm_semaphore(sc);
5629 return 0;
5630 }
5631 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5632 wm_put_swsm_semaphore(sc);
5633 delay(5000);
5634 }
5635 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5636 device_xname(sc->sc_dev), mask, swfw_sync);
5637 return 1;
5638 }
5639
5640 static void
5641 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5642 {
5643 uint32_t swfw_sync;
5644
5645 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5646 while (wm_get_swsm_semaphore(sc) != 0)
5647 continue;
5648 }
5649 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5650 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5651 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5652 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5653 wm_put_swsm_semaphore(sc);
5654 }
5655
5656 static int
5657 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5658 {
5659 uint32_t ext_ctrl;
5660 int timeout = 200;
5661
5662 for(timeout = 0; timeout < 200; timeout++) {
5663 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5664 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5665 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5666
5667 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5668 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5669 return 0;
5670 delay(5000);
5671 }
5672 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5673 device_xname(sc->sc_dev), ext_ctrl);
5674 return 1;
5675 }
5676
5677 static void
5678 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5679 {
5680 uint32_t ext_ctrl;
5681 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5682 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5683 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5684 }
5685
5686 static int
5687 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5688 {
5689 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5690 uint8_t bank_high_byte;
5691 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5692
5693 if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
5694 /* Value of bit 22 corresponds to the flash bank we're on. */
5695 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5696 } else {
5697 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5698 if ((bank_high_byte & 0xc0) == 0x80)
5699 *bank = 0;
5700 else {
5701 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5702 &bank_high_byte);
5703 if ((bank_high_byte & 0xc0) == 0x80)
5704 *bank = 1;
5705 else {
5706 aprint_error_dev(sc->sc_dev,
5707 "EEPROM not present\n");
5708 return -1;
5709 }
5710 }
5711 }
5712
5713 return 0;
5714 }
5715
5716 /******************************************************************************
5717 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5718 * register.
5719 *
5720 * sc - Struct containing variables accessed by shared code
5721 * offset - offset of word in the EEPROM to read
5722 * data - word read from the EEPROM
5723 * words - number of words to read
5724 *****************************************************************************/
5725 static int
5726 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5727 {
5728 int32_t error = 0;
5729 uint32_t flash_bank = 0;
5730 uint32_t act_offset = 0;
5731 uint32_t bank_offset = 0;
5732 uint16_t word = 0;
5733 uint16_t i = 0;
5734
5735 /* We need to know which is the valid flash bank. In the event
5736 * that we didn't allocate eeprom_shadow_ram, we may not be
5737 * managing flash_bank. So it cannot be trusted and needs
5738 * to be updated with each read.
5739 */
5740 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5741 if (error) {
5742 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5743 __func__);
5744 return error;
5745 }
5746
5747 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5748 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5749
5750 error = wm_get_swfwhw_semaphore(sc);
5751 if (error) {
5752 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5753 __func__);
5754 return error;
5755 }
5756
5757 for (i = 0; i < words; i++) {
5758 /* The NVM part needs a byte offset, hence * 2 */
5759 act_offset = bank_offset + ((offset + i) * 2);
5760 error = wm_read_ich8_word(sc, act_offset, &word);
5761 if (error) {
5762 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5763 __func__);
5764 break;
5765 }
5766 data[i] = word;
5767 }
5768
5769 wm_put_swfwhw_semaphore(sc);
5770 return error;
5771 }
5772
5773 /******************************************************************************
5774 * This function does initial flash setup so that a new read/write/erase cycle
5775 * can be started.
5776 *
5777 * sc - The pointer to the hw structure
5778 ****************************************************************************/
5779 static int32_t
5780 wm_ich8_cycle_init(struct wm_softc *sc)
5781 {
5782 uint16_t hsfsts;
5783 int32_t error = 1;
5784 int32_t i = 0;
5785
5786 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5787
5788 /* May be check the Flash Des Valid bit in Hw status */
5789 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5790 return error;
5791 }
5792
5793 /* Clear FCERR in Hw status by writing 1 */
5794 /* Clear DAEL in Hw status by writing a 1 */
5795 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5796
5797 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5798
5799 /* Either we should have a hardware SPI cycle in progress bit to check
5800 * against, in order to start a new cycle or FDONE bit should be changed
5801 * in the hardware so that it is 1 after harware reset, which can then be
5802 * used as an indication whether a cycle is in progress or has been
5803 * completed .. we should also have some software semaphore mechanism to
5804 * guard FDONE or the cycle in progress bit so that two threads access to
5805 * those bits can be sequentiallized or a way so that 2 threads dont
5806 * start the cycle at the same time */
5807
5808 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5809 /* There is no cycle running at present, so we can start a cycle */
5810 /* Begin by setting Flash Cycle Done. */
5811 hsfsts |= HSFSTS_DONE;
5812 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5813 error = 0;
5814 } else {
5815 /* otherwise poll for sometime so the current cycle has a chance
5816 * to end before giving up. */
5817 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5818 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5819 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5820 error = 0;
5821 break;
5822 }
5823 delay(1);
5824 }
5825 if (error == 0) {
5826 /* Successful in waiting for previous cycle to timeout,
5827 * now set the Flash Cycle Done. */
5828 hsfsts |= HSFSTS_DONE;
5829 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5830 }
5831 }
5832 return error;
5833 }
5834
5835 /******************************************************************************
5836 * This function starts a flash cycle and waits for its completion
5837 *
5838 * sc - The pointer to the hw structure
5839 ****************************************************************************/
5840 static int32_t
5841 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5842 {
5843 uint16_t hsflctl;
5844 uint16_t hsfsts;
5845 int32_t error = 1;
5846 uint32_t i = 0;
5847
5848 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5849 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5850 hsflctl |= HSFCTL_GO;
5851 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5852
5853 /* wait till FDONE bit is set to 1 */
5854 do {
5855 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5856 if (hsfsts & HSFSTS_DONE)
5857 break;
5858 delay(1);
5859 i++;
5860 } while (i < timeout);
5861 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5862 error = 0;
5863 }
5864 return error;
5865 }
5866
5867 /******************************************************************************
5868 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5869 *
5870 * sc - The pointer to the hw structure
5871 * index - The index of the byte or word to read.
5872 * size - Size of data to read, 1=byte 2=word
5873 * data - Pointer to the word to store the value read.
5874 *****************************************************************************/
5875 static int32_t
5876 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5877 uint32_t size, uint16_t* data)
5878 {
5879 uint16_t hsfsts;
5880 uint16_t hsflctl;
5881 uint32_t flash_linear_address;
5882 uint32_t flash_data = 0;
5883 int32_t error = 1;
5884 int32_t count = 0;
5885
5886 if (size < 1 || size > 2 || data == 0x0 ||
5887 index > ICH_FLASH_LINEAR_ADDR_MASK)
5888 return error;
5889
5890 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5891 sc->sc_ich8_flash_base;
5892
5893 do {
5894 delay(1);
5895 /* Steps */
5896 error = wm_ich8_cycle_init(sc);
5897 if (error)
5898 break;
5899
5900 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5901 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5902 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5903 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5904 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5905
5906 /* Write the last 24 bits of index into Flash Linear address field in
5907 * Flash Address */
5908 /* TODO: TBD maybe check the index against the size of flash */
5909
5910 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5911
5912 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5913
5914 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5915 * sequence a few more times, else read in (shift in) the Flash Data0,
5916 * the order is least significant byte first msb to lsb */
5917 if (error == 0) {
5918 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5919 if (size == 1) {
5920 *data = (uint8_t)(flash_data & 0x000000FF);
5921 } else if (size == 2) {
5922 *data = (uint16_t)(flash_data & 0x0000FFFF);
5923 }
5924 break;
5925 } else {
5926 /* If we've gotten here, then things are probably completely hosed,
5927 * but if the error condition is detected, it won't hurt to give
5928 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5929 */
5930 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5931 if (hsfsts & HSFSTS_ERR) {
5932 /* Repeat for some time before giving up. */
5933 continue;
5934 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5935 break;
5936 }
5937 }
5938 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5939
5940 return error;
5941 }
5942
5943 /******************************************************************************
5944 * Reads a single byte from the NVM using the ICH8 flash access registers.
5945 *
5946 * sc - pointer to wm_hw structure
5947 * index - The index of the byte to read.
5948 * data - Pointer to a byte to store the value read.
5949 *****************************************************************************/
5950 static int32_t
5951 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5952 {
5953 int32_t status;
5954 uint16_t word = 0;
5955
5956 status = wm_read_ich8_data(sc, index, 1, &word);
5957 if (status == 0) {
5958 *data = (uint8_t)word;
5959 }
5960
5961 return status;
5962 }
5963
5964 /******************************************************************************
5965 * Reads a word from the NVM using the ICH8 flash access registers.
5966 *
5967 * sc - pointer to wm_hw structure
5968 * index - The starting byte index of the word to read.
5969 * data - Pointer to a word to store the value read.
5970 *****************************************************************************/
5971 static int32_t
5972 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5973 {
5974 int32_t status;
5975
5976 status = wm_read_ich8_data(sc, index, 2, data);
5977 return status;
5978 }
5979
5980 static int
5981 wm_check_mng_mode(struct wm_softc *sc)
5982 {
5983 int rv;
5984
5985 switch (sc->sc_type) {
5986 case WM_T_ICH8:
5987 case WM_T_ICH9:
5988 case WM_T_ICH10:
5989 case WM_T_PCH:
5990 rv = wm_check_mng_mode_ich8lan(sc);
5991 break;
5992 case WM_T_82574:
5993 case WM_T_82583:
5994 rv = wm_check_mng_mode_82574(sc);
5995 break;
5996 case WM_T_82571:
5997 case WM_T_82572:
5998 case WM_T_82573:
5999 case WM_T_80003:
6000 rv = wm_check_mng_mode_generic(sc);
6001 break;
6002 default:
6003 /* noting to do */
6004 rv = 0;
6005 break;
6006 }
6007
6008 return rv;
6009 }
6010
6011 static int
6012 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
6013 {
6014 uint32_t fwsm;
6015
6016 fwsm = CSR_READ(sc, WMREG_FWSM);
6017
6018 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
6019 return 1;
6020
6021 return 0;
6022 }
6023
6024 static int
6025 wm_check_mng_mode_82574(struct wm_softc *sc)
6026 {
6027 uint16_t data;
6028
6029 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
6030
6031 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
6032 return 1;
6033
6034 return 0;
6035 }
6036
6037 static int
6038 wm_check_mng_mode_generic(struct wm_softc *sc)
6039 {
6040 uint32_t fwsm;
6041
6042 fwsm = CSR_READ(sc, WMREG_FWSM);
6043
6044 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
6045 return 1;
6046
6047 return 0;
6048 }
6049
6050 static int
6051 wm_check_reset_block(struct wm_softc *sc)
6052 {
6053 uint32_t reg;
6054
6055 switch (sc->sc_type) {
6056 case WM_T_ICH8:
6057 case WM_T_ICH9:
6058 case WM_T_ICH10:
6059 case WM_T_PCH:
6060 reg = CSR_READ(sc, WMREG_FWSM);
6061 if ((reg & FWSM_RSPCIPHY) != 0)
6062 return 0;
6063 else
6064 return -1;
6065 break;
6066 case WM_T_82571:
6067 case WM_T_82572:
6068 case WM_T_82573:
6069 case WM_T_82574:
6070 case WM_T_82583:
6071 case WM_T_80003:
6072 reg = CSR_READ(sc, WMREG_MANC);
6073 if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
6074 return -1;
6075 else
6076 return 0;
6077 break;
6078 default:
6079 /* no problem */
6080 break;
6081 }
6082
6083 return 0;
6084 }
6085
6086 static void
6087 wm_get_hw_control(struct wm_softc *sc)
6088 {
6089 uint32_t reg;
6090
6091 switch (sc->sc_type) {
6092 case WM_T_82573:
6093 #if 0
6094 case WM_T_82574:
6095 case WM_T_82583:
6096 /*
6097 * FreeBSD's em driver has the function for 82574 to checks
6098 * the management mode, but it's not used. Why?
6099 */
6100 #endif
6101 reg = CSR_READ(sc, WMREG_SWSM);
6102 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
6103 break;
6104 case WM_T_82571:
6105 case WM_T_82572:
6106 case WM_T_80003:
6107 case WM_T_ICH8:
6108 case WM_T_ICH9:
6109 case WM_T_ICH10:
6110 case WM_T_PCH:
6111 reg = CSR_READ(sc, WMREG_CTRL_EXT);
6112 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
6113 break;
6114 default:
6115 break;
6116 }
6117 }
6118
6119 /* XXX Currently TBI only */
6120 static int
6121 wm_check_for_link(struct wm_softc *sc)
6122 {
6123 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6124 uint32_t rxcw;
6125 uint32_t ctrl;
6126 uint32_t status;
6127 uint32_t sig;
6128
6129 rxcw = CSR_READ(sc, WMREG_RXCW);
6130 ctrl = CSR_READ(sc, WMREG_CTRL);
6131 status = CSR_READ(sc, WMREG_STATUS);
6132
6133 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
6134
6135 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
6136 device_xname(sc->sc_dev), __func__,
6137 ((ctrl & CTRL_SWDPIN(1)) == sig),
6138 ((status & STATUS_LU) != 0),
6139 ((rxcw & RXCW_C) != 0)
6140 ));
6141
6142 /*
6143 * SWDPIN LU RXCW
6144 * 0 0 0
6145 * 0 0 1 (should not happen)
6146 * 0 1 0 (should not happen)
6147 * 0 1 1 (should not happen)
6148 * 1 0 0 Disable autonego and force linkup
6149 * 1 0 1 got /C/ but not linkup yet
6150 * 1 1 0 (linkup)
6151 * 1 1 1 If IFM_AUTO, back to autonego
6152 *
6153 */
6154 if (((ctrl & CTRL_SWDPIN(1)) == sig)
6155 && ((status & STATUS_LU) == 0)
6156 && ((rxcw & RXCW_C) == 0)) {
6157 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
6158 __func__));
6159 sc->sc_tbi_linkup = 0;
6160 /* Disable auto-negotiation in the TXCW register */
6161 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
6162
6163 /*
6164 * Force link-up and also force full-duplex.
6165 *
6166 * NOTE: CTRL was updated TFCE and RFCE automatically,
6167 * so we should update sc->sc_ctrl
6168 */
6169 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
6170 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6171 } else if(((status & STATUS_LU) != 0)
6172 && ((rxcw & RXCW_C) != 0)
6173 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
6174 sc->sc_tbi_linkup = 1;
6175 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
6176 __func__));
6177 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6178 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
6179 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
6180 && ((rxcw & RXCW_C) != 0)) {
6181 DPRINTF(WM_DEBUG_LINK, ("/C/"));
6182 } else {
6183 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
6184 status));
6185 }
6186
6187 return 0;
6188 }
6189