if_wm.c revision 1.187 1 /* $NetBSD: if_wm.c,v 1.187 2010/01/05 10:02:01 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.187 2010/01/05 10:02:01 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
140
141 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
142 #else
143 #define DPRINTF(x, y) /* nothing */
144 #endif /* WM_DEBUG */
145
146 /*
147 * Transmit descriptor list size. Due to errata, we can only have
148 * 256 hardware descriptors in the ring on < 82544, but we use 4096
149 * on >= 82544. We tell the upper layers that they can queue a lot
150 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
151 * of them at a time.
152 *
153 * We allow up to 256 (!) DMA segments per packet. Pathological packet
154 * chains containing many small mbufs have been observed in zero-copy
155 * situations with jumbo frames.
156 */
157 #define WM_NTXSEGS 256
158 #define WM_IFQUEUELEN 256
159 #define WM_TXQUEUELEN_MAX 64
160 #define WM_TXQUEUELEN_MAX_82547 16
161 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
162 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
163 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
164 #define WM_NTXDESC_82542 256
165 #define WM_NTXDESC_82544 4096
166 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
167 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
168 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
169 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
170 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
171
172 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
173
174 /*
175 * Receive descriptor list size. We have one Rx buffer for normal
176 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
177 * packet. We allocate 256 receive descriptors, each with a 2k
178 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
179 */
180 #define WM_NRXDESC 256
181 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
182 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
183 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
184
185 /*
186 * Control structures are DMA'd to the i82542 chip. We allocate them in
187 * a single clump that maps to a single DMA segment to make several things
188 * easier.
189 */
190 struct wm_control_data_82544 {
191 /*
192 * The receive descriptors.
193 */
194 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
195
196 /*
197 * The transmit descriptors. Put these at the end, because
198 * we might use a smaller number of them.
199 */
200 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 /*
236 * Software state per device.
237 */
238 struct wm_softc {
239 device_t sc_dev; /* generic device information */
240 bus_space_tag_t sc_st; /* bus space tag */
241 bus_space_handle_t sc_sh; /* bus space handle */
242 bus_space_tag_t sc_iot; /* I/O space tag */
243 bus_space_handle_t sc_ioh; /* I/O space handle */
244 bus_space_tag_t sc_flasht; /* flash registers space tag */
245 bus_space_handle_t sc_flashh; /* flash registers space handle */
246 bus_dma_tag_t sc_dmat; /* bus DMA tag */
247 struct ethercom sc_ethercom; /* ethernet common data */
248 pci_chipset_tag_t sc_pc;
249 pcitag_t sc_pcitag;
250
251 wm_chip_type sc_type; /* chip type */
252 int sc_flags; /* flags; see below */
253 int sc_if_flags; /* last if_flags */
254 int sc_bus_speed; /* PCI/PCIX bus speed */
255 int sc_pcix_offset; /* PCIX capability register offset */
256 int sc_flowflags; /* 802.3x flow control flags */
257
258 void *sc_ih; /* interrupt cookie */
259
260 int sc_ee_addrbits; /* EEPROM address bits */
261
262 struct mii_data sc_mii; /* MII/media information */
263
264 callout_t sc_tick_ch; /* tick callout */
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 int sc_align_tweak;
270
271 /*
272 * Software state for the transmit and receive descriptors.
273 */
274 int sc_txnum; /* must be a power of two */
275 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
276 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
277
278 /*
279 * Control data structures.
280 */
281 int sc_ntxdesc; /* must be a power of two */
282 struct wm_control_data_82544 *sc_control_data;
283 #define sc_txdescs sc_control_data->wcd_txdescs
284 #define sc_rxdescs sc_control_data->wcd_rxdescs
285
286 #ifdef WM_EVENT_COUNTERS
287 /* Event counters. */
288 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
289 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
290 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
291 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
292 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
293 struct evcnt sc_ev_rxintr; /* Rx interrupts */
294 struct evcnt sc_ev_linkintr; /* Link interrupts */
295
296 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
297 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
298 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
299 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
300 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
301 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
302 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
303 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
304
305 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
306 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
307
308 struct evcnt sc_ev_tu; /* Tx underrun */
309
310 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
311 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
312 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
313 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
314 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
315 #endif /* WM_EVENT_COUNTERS */
316
317 bus_addr_t sc_tdt_reg; /* offset of TDT register */
318
319 int sc_txfree; /* number of free Tx descriptors */
320 int sc_txnext; /* next ready Tx descriptor */
321
322 int sc_txsfree; /* number of free Tx jobs */
323 int sc_txsnext; /* next free Tx job */
324 int sc_txsdirty; /* dirty Tx jobs */
325
326 /* These 5 variables are used only on the 82547. */
327 int sc_txfifo_size; /* Tx FIFO size */
328 int sc_txfifo_head; /* current head of FIFO */
329 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
330 int sc_txfifo_stall; /* Tx FIFO is stalled */
331 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
332
333 bus_addr_t sc_rdt_reg; /* offset of RDT register */
334
335 int sc_rxptr; /* next ready Rx descriptor/queue ent */
336 int sc_rxdiscard;
337 int sc_rxlen;
338 struct mbuf *sc_rxhead;
339 struct mbuf *sc_rxtail;
340 struct mbuf **sc_rxtailp;
341
342 uint32_t sc_ctrl; /* prototype CTRL register */
343 #if 0
344 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
345 #endif
346 uint32_t sc_icr; /* prototype interrupt bits */
347 uint32_t sc_itr; /* prototype intr throttling reg */
348 uint32_t sc_tctl; /* prototype TCTL register */
349 uint32_t sc_rctl; /* prototype RCTL register */
350 uint32_t sc_txcw; /* prototype TXCW register */
351 uint32_t sc_tipg; /* prototype TIPG register */
352 uint32_t sc_fcrtl; /* prototype FCRTL register */
353 uint32_t sc_pba; /* prototype PBA register */
354
355 int sc_tbi_linkup; /* TBI link status */
356 int sc_tbi_anegticks; /* autonegotiation ticks */
357 int sc_tbi_ticks; /* tbi ticks */
358 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
359 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
360
361 int sc_mchash_type; /* multicast filter offset */
362
363 #if NRND > 0
364 rndsource_element_t rnd_source; /* random source */
365 #endif
366 int sc_ich8_flash_base;
367 int sc_ich8_flash_bank_size;
368 };
369
370 #define WM_RXCHAIN_RESET(sc) \
371 do { \
372 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
373 *(sc)->sc_rxtailp = NULL; \
374 (sc)->sc_rxlen = 0; \
375 } while (/*CONSTCOND*/0)
376
377 #define WM_RXCHAIN_LINK(sc, m) \
378 do { \
379 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
380 (sc)->sc_rxtailp = &(m)->m_next; \
381 } while (/*CONSTCOND*/0)
382
383 #ifdef WM_EVENT_COUNTERS
384 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
385 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
386 #else
387 #define WM_EVCNT_INCR(ev) /* nothing */
388 #define WM_EVCNT_ADD(ev, val) /* nothing */
389 #endif
390
391 #define CSR_READ(sc, reg) \
392 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
393 #define CSR_WRITE(sc, reg, val) \
394 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
395 #define CSR_WRITE_FLUSH(sc) \
396 (void) CSR_READ((sc), WMREG_STATUS)
397
398 #define ICH8_FLASH_READ32(sc, reg) \
399 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
400 #define ICH8_FLASH_WRITE32(sc, reg, data) \
401 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
402
403 #define ICH8_FLASH_READ16(sc, reg) \
404 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
405 #define ICH8_FLASH_WRITE16(sc, reg, data) \
406 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
407
408 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
409 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
410
411 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
412 #define WM_CDTXADDR_HI(sc, x) \
413 (sizeof(bus_addr_t) == 8 ? \
414 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
415
416 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
417 #define WM_CDRXADDR_HI(sc, x) \
418 (sizeof(bus_addr_t) == 8 ? \
419 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
420
421 #define WM_CDTXSYNC(sc, x, n, ops) \
422 do { \
423 int __x, __n; \
424 \
425 __x = (x); \
426 __n = (n); \
427 \
428 /* If it will wrap around, sync to the end of the ring. */ \
429 if ((__x + __n) > WM_NTXDESC(sc)) { \
430 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
431 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
432 (WM_NTXDESC(sc) - __x), (ops)); \
433 __n -= (WM_NTXDESC(sc) - __x); \
434 __x = 0; \
435 } \
436 \
437 /* Now sync whatever is left. */ \
438 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
439 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
440 } while (/*CONSTCOND*/0)
441
442 #define WM_CDRXSYNC(sc, x, ops) \
443 do { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
446 } while (/*CONSTCOND*/0)
447
448 #define WM_INIT_RXDESC(sc, x) \
449 do { \
450 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
451 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
452 struct mbuf *__m = __rxs->rxs_mbuf; \
453 \
454 /* \
455 * Note: We scoot the packet forward 2 bytes in the buffer \
456 * so that the payload after the Ethernet header is aligned \
457 * to a 4-byte boundary. \
458 * \
459 * XXX BRAINDAMAGE ALERT! \
460 * The stupid chip uses the same size for every buffer, which \
461 * is set in the Receive Control register. We are using the 2K \
462 * size option, but what we REALLY want is (2K - 2)! For this \
463 * reason, we can't "scoot" packets longer than the standard \
464 * Ethernet MTU. On strict-alignment platforms, if the total \
465 * size exceeds (2K - 2) we set align_tweak to 0 and let \
466 * the upper layer copy the headers. \
467 */ \
468 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
469 \
470 wm_set_dma_addr(&__rxd->wrx_addr, \
471 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
472 __rxd->wrx_len = 0; \
473 __rxd->wrx_cksum = 0; \
474 __rxd->wrx_status = 0; \
475 __rxd->wrx_errors = 0; \
476 __rxd->wrx_special = 0; \
477 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
478 \
479 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
480 } while (/*CONSTCOND*/0)
481
482 static void wm_start(struct ifnet *);
483 static void wm_watchdog(struct ifnet *);
484 static int wm_ioctl(struct ifnet *, u_long, void *);
485 static int wm_init(struct ifnet *);
486 static void wm_stop(struct ifnet *, int);
487
488 static void wm_reset(struct wm_softc *);
489 static void wm_rxdrain(struct wm_softc *);
490 static int wm_add_rxbuf(struct wm_softc *, int);
491 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
492 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
493 static int wm_validate_eeprom_checksum(struct wm_softc *);
494 static void wm_tick(void *);
495
496 static void wm_set_filter(struct wm_softc *);
497
498 static int wm_intr(void *);
499 static void wm_txintr(struct wm_softc *);
500 static void wm_rxintr(struct wm_softc *);
501 static void wm_linkintr(struct wm_softc *, uint32_t);
502
503 static void wm_tbi_mediainit(struct wm_softc *);
504 static int wm_tbi_mediachange(struct ifnet *);
505 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
506
507 static void wm_tbi_set_linkled(struct wm_softc *);
508 static void wm_tbi_check_link(struct wm_softc *);
509
510 static void wm_gmii_reset(struct wm_softc *);
511
512 static int wm_gmii_i82543_readreg(device_t, int, int);
513 static void wm_gmii_i82543_writereg(device_t, int, int, int);
514
515 static int wm_gmii_i82544_readreg(device_t, int, int);
516 static void wm_gmii_i82544_writereg(device_t, int, int, int);
517
518 static int wm_gmii_i80003_readreg(device_t, int, int);
519 static void wm_gmii_i80003_writereg(device_t, int, int, int);
520
521 static int wm_gmii_bm_readreg(device_t, int, int);
522 static void wm_gmii_bm_writereg(device_t, int, int, int);
523
524 static void wm_gmii_statchg(device_t);
525
526 static void wm_gmii_mediainit(struct wm_softc *);
527 static int wm_gmii_mediachange(struct ifnet *);
528 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
529
530 static int wm_kmrn_readreg(struct wm_softc *, int);
531 static void wm_kmrn_writereg(struct wm_softc *, int, int);
532
533 static void wm_set_spiaddrsize(struct wm_softc *);
534 static int wm_match(device_t, cfdata_t, void *);
535 static void wm_attach(device_t, device_t, void *);
536 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
537 static void wm_get_auto_rd_done(struct wm_softc *);
538 static int wm_get_swsm_semaphore(struct wm_softc *);
539 static void wm_put_swsm_semaphore(struct wm_softc *);
540 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
541 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
542 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
543 static int wm_get_swfwhw_semaphore(struct wm_softc *);
544 static void wm_put_swfwhw_semaphore(struct wm_softc *);
545
546 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
547 static int32_t wm_ich8_cycle_init(struct wm_softc *);
548 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
549 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
550 uint32_t, uint16_t *);
551 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
552 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
553 static void wm_82547_txfifo_stall(void *);
554 static int wm_check_mng_mode(struct wm_softc *);
555 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
556 static int wm_check_mng_mode_82574(struct wm_softc *);
557 static int wm_check_mng_mode_generic(struct wm_softc *);
558 static void wm_get_hw_control(struct wm_softc *);
559 static int wm_check_for_link(struct wm_softc *);
560
561 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
562 wm_match, wm_attach, NULL, NULL);
563
564 /*
565 * Devices supported by this driver.
566 */
567 static const struct wm_product {
568 pci_vendor_id_t wmp_vendor;
569 pci_product_id_t wmp_product;
570 const char *wmp_name;
571 wm_chip_type wmp_type;
572 int wmp_flags;
573 #define WMP_F_1000X 0x01
574 #define WMP_F_1000T 0x02
575 } wm_products[] = {
576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
577 "Intel i82542 1000BASE-X Ethernet",
578 WM_T_82542_2_1, WMP_F_1000X },
579
580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
581 "Intel i82543GC 1000BASE-X Ethernet",
582 WM_T_82543, WMP_F_1000X },
583
584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
585 "Intel i82543GC 1000BASE-T Ethernet",
586 WM_T_82543, WMP_F_1000T },
587
588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
589 "Intel i82544EI 1000BASE-T Ethernet",
590 WM_T_82544, WMP_F_1000T },
591
592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
593 "Intel i82544EI 1000BASE-X Ethernet",
594 WM_T_82544, WMP_F_1000X },
595
596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
597 "Intel i82544GC 1000BASE-T Ethernet",
598 WM_T_82544, WMP_F_1000T },
599
600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
601 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
602 WM_T_82544, WMP_F_1000T },
603
604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
605 "Intel i82540EM 1000BASE-T Ethernet",
606 WM_T_82540, WMP_F_1000T },
607
608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
609 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
610 WM_T_82540, WMP_F_1000T },
611
612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
613 "Intel i82540EP 1000BASE-T Ethernet",
614 WM_T_82540, WMP_F_1000T },
615
616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
617 "Intel i82540EP 1000BASE-T Ethernet",
618 WM_T_82540, WMP_F_1000T },
619
620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
621 "Intel i82540EP 1000BASE-T Ethernet",
622 WM_T_82540, WMP_F_1000T },
623
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
625 "Intel i82545EM 1000BASE-T Ethernet",
626 WM_T_82545, WMP_F_1000T },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
629 "Intel i82545GM 1000BASE-T Ethernet",
630 WM_T_82545_3, WMP_F_1000T },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
633 "Intel i82545GM 1000BASE-X Ethernet",
634 WM_T_82545_3, WMP_F_1000X },
635 #if 0
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
637 "Intel i82545GM Gigabit Ethernet (SERDES)",
638 WM_T_82545_3, WMP_F_SERDES },
639 #endif
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
641 "Intel i82546EB 1000BASE-T Ethernet",
642 WM_T_82546, WMP_F_1000T },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
645 "Intel i82546EB 1000BASE-T Ethernet",
646 WM_T_82546, WMP_F_1000T },
647
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
649 "Intel i82545EM 1000BASE-X Ethernet",
650 WM_T_82545, WMP_F_1000X },
651
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
653 "Intel i82546EB 1000BASE-X Ethernet",
654 WM_T_82546, WMP_F_1000X },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
657 "Intel i82546GB 1000BASE-T Ethernet",
658 WM_T_82546_3, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
661 "Intel i82546GB 1000BASE-X Ethernet",
662 WM_T_82546_3, WMP_F_1000X },
663 #if 0
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
665 "Intel i82546GB Gigabit Ethernet (SERDES)",
666 WM_T_82546_3, WMP_F_SERDES },
667 #endif
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
669 "i82546GB quad-port Gigabit Ethernet",
670 WM_T_82546_3, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
673 "i82546GB quad-port Gigabit Ethernet (KSP3)",
674 WM_T_82546_3, WMP_F_1000T },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
677 "Intel PRO/1000MT (82546GB)",
678 WM_T_82546_3, WMP_F_1000T },
679
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
681 "Intel i82541EI 1000BASE-T Ethernet",
682 WM_T_82541, WMP_F_1000T },
683
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
685 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
686 WM_T_82541, WMP_F_1000T },
687
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
689 "Intel i82541EI Mobile 1000BASE-T Ethernet",
690 WM_T_82541, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
693 "Intel i82541ER 1000BASE-T Ethernet",
694 WM_T_82541_2, WMP_F_1000T },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
697 "Intel i82541GI 1000BASE-T Ethernet",
698 WM_T_82541_2, WMP_F_1000T },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
701 "Intel i82541GI Mobile 1000BASE-T Ethernet",
702 WM_T_82541_2, WMP_F_1000T },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
705 "Intel i82541PI 1000BASE-T Ethernet",
706 WM_T_82541_2, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
709 "Intel i82547EI 1000BASE-T Ethernet",
710 WM_T_82547, WMP_F_1000T },
711
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
713 "Intel i82547EI Mobile 1000BASE-T Ethernet",
714 WM_T_82547, WMP_F_1000T },
715
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
717 "Intel i82547GI 1000BASE-T Ethernet",
718 WM_T_82547_2, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
721 "Intel PRO/1000 PT (82571EB)",
722 WM_T_82571, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
725 "Intel PRO/1000 PF (82571EB)",
726 WM_T_82571, WMP_F_1000X },
727 #if 0
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
729 "Intel PRO/1000 PB (82571EB)",
730 WM_T_82571, WMP_F_SERDES },
731 #endif
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
733 "Intel PRO/1000 QT (82571EB)",
734 WM_T_82571, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
737 "Intel i82572EI 1000baseT Ethernet",
738 WM_T_82572, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
741 "Intel PRO/1000 PT Quad Port Server Adapter",
742 WM_T_82571, WMP_F_1000T, },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
745 "Intel i82572EI 1000baseX Ethernet",
746 WM_T_82572, WMP_F_1000X },
747 #if 0
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
749 "Intel i82572EI Gigabit Ethernet (SERDES)",
750 WM_T_82572, WMP_F_SERDES },
751 #endif
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
754 "Intel i82572EI 1000baseT Ethernet",
755 WM_T_82572, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
758 "Intel i82573E",
759 WM_T_82573, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
762 "Intel i82573E IAMT",
763 WM_T_82573, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
766 "Intel i82573L Gigabit Ethernet",
767 WM_T_82573, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
770 "Intel i82574L",
771 WM_T_82574, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
774 "Intel i82583V",
775 WM_T_82583, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
778 "i80003 dual 1000baseT Ethernet",
779 WM_T_80003, WMP_F_1000T },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
782 "i80003 dual 1000baseX Ethernet",
783 WM_T_80003, WMP_F_1000T },
784 #if 0
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
786 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
787 WM_T_80003, WMP_F_SERDES },
788 #endif
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
791 "Intel i80003 1000baseT Ethernet",
792 WM_T_80003, WMP_F_1000T },
793 #if 0
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
795 "Intel i80003 Gigabit Ethernet (SERDES)",
796 WM_T_80003, WMP_F_SERDES },
797 #endif
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
799 "Intel i82801H (M_AMT) LAN Controller",
800 WM_T_ICH8, WMP_F_1000T },
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
802 "Intel i82801H (AMT) LAN Controller",
803 WM_T_ICH8, WMP_F_1000T },
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
805 "Intel i82801H LAN Controller",
806 WM_T_ICH8, WMP_F_1000T },
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
808 "Intel i82801H (IFE) LAN Controller",
809 WM_T_ICH8, WMP_F_1000T },
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
811 "Intel i82801H (M) LAN Controller",
812 WM_T_ICH8, WMP_F_1000T },
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
814 "Intel i82801H IFE (GT) LAN Controller",
815 WM_T_ICH8, WMP_F_1000T },
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
817 "Intel i82801H IFE (G) LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
820 "82801I (AMT) LAN Controller",
821 WM_T_ICH9, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
823 "82801I LAN Controller",
824 WM_T_ICH9, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
826 "82801I (G) LAN Controller",
827 WM_T_ICH9, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
829 "82801I (GT) LAN Controller",
830 WM_T_ICH9, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
832 "82801I (C) LAN Controller",
833 WM_T_ICH9, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
835 "82801I mobile LAN Controller",
836 WM_T_ICH9, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
838 "82801I mobile (V) LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
841 "82801I mobile (AMT) LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3,
844 "82567LM-3 LAN Controller",
845 WM_T_ICH10, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3,
847 "82567LF-3 LAN Controller",
848 WM_T_ICH10, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
850 "i82801J (LF) LAN Controller",
851 WM_T_ICH10, WMP_F_1000T },
852 { 0, 0,
853 NULL,
854 0, 0 },
855 };
856
857 #ifdef WM_EVENT_COUNTERS
858 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
859 #endif /* WM_EVENT_COUNTERS */
860
861 #if 0 /* Not currently used */
862 static inline uint32_t
863 wm_io_read(struct wm_softc *sc, int reg)
864 {
865
866 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
867 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
868 }
869 #endif
870
871 static inline void
872 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
873 {
874
875 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
876 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
877 }
878
879 static inline void
880 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
881 {
882 wa->wa_low = htole32(v & 0xffffffffU);
883 if (sizeof(bus_addr_t) == 8)
884 wa->wa_high = htole32((uint64_t) v >> 32);
885 else
886 wa->wa_high = 0;
887 }
888
889 static void
890 wm_set_spiaddrsize(struct wm_softc *sc)
891 {
892 uint32_t reg;
893
894 sc->sc_flags |= WM_F_EEPROM_SPI;
895 reg = CSR_READ(sc, WMREG_EECD);
896 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
897 }
898
899 static const struct wm_product *
900 wm_lookup(const struct pci_attach_args *pa)
901 {
902 const struct wm_product *wmp;
903
904 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
905 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
906 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
907 return (wmp);
908 }
909 return (NULL);
910 }
911
912 static int
913 wm_match(device_t parent, cfdata_t cf, void *aux)
914 {
915 struct pci_attach_args *pa = aux;
916
917 if (wm_lookup(pa) != NULL)
918 return (1);
919
920 return (0);
921 }
922
923 static void
924 wm_attach(device_t parent, device_t self, void *aux)
925 {
926 struct wm_softc *sc = device_private(self);
927 struct pci_attach_args *pa = aux;
928 prop_dictionary_t dict;
929 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
930 pci_chipset_tag_t pc = pa->pa_pc;
931 pci_intr_handle_t ih;
932 size_t cdata_size;
933 const char *intrstr = NULL;
934 const char *eetype, *xname;
935 bus_space_tag_t memt;
936 bus_space_handle_t memh;
937 bus_dma_segment_t seg;
938 int memh_valid;
939 int i, rseg, error;
940 const struct wm_product *wmp;
941 prop_data_t ea;
942 prop_number_t pn;
943 uint8_t enaddr[ETHER_ADDR_LEN];
944 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin, io3;
945 pcireg_t preg, memtype;
946 uint32_t reg;
947
948 sc->sc_dev = self;
949 callout_init(&sc->sc_tick_ch, 0);
950
951 wmp = wm_lookup(pa);
952 if (wmp == NULL) {
953 printf("\n");
954 panic("wm_attach: impossible");
955 }
956
957 sc->sc_pc = pa->pa_pc;
958 sc->sc_pcitag = pa->pa_tag;
959
960 if (pci_dma64_available(pa))
961 sc->sc_dmat = pa->pa_dmat64;
962 else
963 sc->sc_dmat = pa->pa_dmat;
964
965 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
966 aprint_naive(": Ethernet controller\n");
967 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
968
969 sc->sc_type = wmp->wmp_type;
970 if (sc->sc_type < WM_T_82543) {
971 if (preg < 2) {
972 aprint_error_dev(sc->sc_dev,
973 "i82542 must be at least rev. 2\n");
974 return;
975 }
976 if (preg < 3)
977 sc->sc_type = WM_T_82542_2_0;
978 }
979
980 /* Set device properties (mactype) */
981 dict = device_properties(sc->sc_dev);
982 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
983
984 /*
985 * Map the device. All devices support memory-mapped acccess,
986 * and it is really required for normal operation.
987 */
988 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
989 switch (memtype) {
990 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
991 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
992 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
993 memtype, 0, &memt, &memh, NULL, NULL) == 0);
994 break;
995 default:
996 memh_valid = 0;
997 }
998
999 if (memh_valid) {
1000 sc->sc_st = memt;
1001 sc->sc_sh = memh;
1002 } else {
1003 aprint_error_dev(sc->sc_dev,
1004 "unable to map device registers\n");
1005 return;
1006 }
1007
1008 /*
1009 * In addition, i82544 and later support I/O mapped indirect
1010 * register access. It is not desirable (nor supported in
1011 * this driver) to use it for normal operation, though it is
1012 * required to work around bugs in some chip versions.
1013 */
1014 if (sc->sc_type >= WM_T_82544) {
1015 /* First we have to find the I/O BAR. */
1016 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1017 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1018 PCI_MAPREG_TYPE_IO)
1019 break;
1020 }
1021 if (i == PCI_MAPREG_END)
1022 aprint_error_dev(sc->sc_dev,
1023 "WARNING: unable to find I/O BAR\n");
1024 else {
1025 /*
1026 * The i8254x doesn't apparently respond when the
1027 * I/O BAR is 0, which looks somewhat like it's not
1028 * been configured.
1029 */
1030 preg = pci_conf_read(pc, pa->pa_tag, i);
1031 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1032 aprint_error_dev(sc->sc_dev,
1033 "WARNING: I/O BAR at zero.\n");
1034 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1035 0, &sc->sc_iot, &sc->sc_ioh,
1036 NULL, NULL) == 0) {
1037 sc->sc_flags |= WM_F_IOH_VALID;
1038 } else {
1039 aprint_error_dev(sc->sc_dev,
1040 "WARNING: unable to map I/O space\n");
1041 }
1042 }
1043
1044 }
1045
1046 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1047 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1048 preg |= PCI_COMMAND_MASTER_ENABLE;
1049 if (sc->sc_type < WM_T_82542_2_1)
1050 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1051 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1052
1053 /* power up chip */
1054 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1055 NULL)) && error != EOPNOTSUPP) {
1056 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1057 return;
1058 }
1059
1060 /*
1061 * Map and establish our interrupt.
1062 */
1063 if (pci_intr_map(pa, &ih)) {
1064 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1065 return;
1066 }
1067 intrstr = pci_intr_string(pc, ih);
1068 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1069 if (sc->sc_ih == NULL) {
1070 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1071 if (intrstr != NULL)
1072 aprint_error(" at %s", intrstr);
1073 aprint_error("\n");
1074 return;
1075 }
1076 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1077
1078 /*
1079 * Determine a few things about the bus we're connected to.
1080 */
1081 if (sc->sc_type < WM_T_82543) {
1082 /* We don't really know the bus characteristics here. */
1083 sc->sc_bus_speed = 33;
1084 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1085 /*
1086 * CSA (Communication Streaming Architecture) is about as fast
1087 * a 32-bit 66MHz PCI Bus.
1088 */
1089 sc->sc_flags |= WM_F_CSA;
1090 sc->sc_bus_speed = 66;
1091 aprint_verbose_dev(sc->sc_dev,
1092 "Communication Streaming Architecture\n");
1093 if (sc->sc_type == WM_T_82547) {
1094 callout_init(&sc->sc_txfifo_ch, 0);
1095 callout_setfunc(&sc->sc_txfifo_ch,
1096 wm_82547_txfifo_stall, sc);
1097 aprint_verbose_dev(sc->sc_dev,
1098 "using 82547 Tx FIFO stall work-around\n");
1099 }
1100 } else if (sc->sc_type >= WM_T_82571) {
1101 sc->sc_flags |= WM_F_PCIE;
1102 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1103 && (sc->sc_type != WM_T_ICH10))
1104 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1105 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1106 } else {
1107 reg = CSR_READ(sc, WMREG_STATUS);
1108 if (reg & STATUS_BUS64)
1109 sc->sc_flags |= WM_F_BUS64;
1110 if ((reg & STATUS_PCIX_MODE) != 0) {
1111 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1112
1113 sc->sc_flags |= WM_F_PCIX;
1114 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1115 PCI_CAP_PCIX,
1116 &sc->sc_pcix_offset, NULL) == 0)
1117 aprint_error_dev(sc->sc_dev,
1118 "unable to find PCIX capability\n");
1119 else if (sc->sc_type != WM_T_82545_3 &&
1120 sc->sc_type != WM_T_82546_3) {
1121 /*
1122 * Work around a problem caused by the BIOS
1123 * setting the max memory read byte count
1124 * incorrectly.
1125 */
1126 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1127 sc->sc_pcix_offset + PCI_PCIX_CMD);
1128 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1129 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1130
1131 bytecnt =
1132 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1133 PCI_PCIX_CMD_BYTECNT_SHIFT;
1134 maxb =
1135 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1136 PCI_PCIX_STATUS_MAXB_SHIFT;
1137 if (bytecnt > maxb) {
1138 aprint_verbose_dev(sc->sc_dev,
1139 "resetting PCI-X MMRBC: %d -> %d\n",
1140 512 << bytecnt, 512 << maxb);
1141 pcix_cmd = (pcix_cmd &
1142 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1143 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1144 pci_conf_write(pa->pa_pc, pa->pa_tag,
1145 sc->sc_pcix_offset + PCI_PCIX_CMD,
1146 pcix_cmd);
1147 }
1148 }
1149 }
1150 /*
1151 * The quad port adapter is special; it has a PCIX-PCIX
1152 * bridge on the board, and can run the secondary bus at
1153 * a higher speed.
1154 */
1155 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1156 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1157 : 66;
1158 } else if (sc->sc_flags & WM_F_PCIX) {
1159 switch (reg & STATUS_PCIXSPD_MASK) {
1160 case STATUS_PCIXSPD_50_66:
1161 sc->sc_bus_speed = 66;
1162 break;
1163 case STATUS_PCIXSPD_66_100:
1164 sc->sc_bus_speed = 100;
1165 break;
1166 case STATUS_PCIXSPD_100_133:
1167 sc->sc_bus_speed = 133;
1168 break;
1169 default:
1170 aprint_error_dev(sc->sc_dev,
1171 "unknown PCIXSPD %d; assuming 66MHz\n",
1172 reg & STATUS_PCIXSPD_MASK);
1173 sc->sc_bus_speed = 66;
1174 }
1175 } else
1176 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1177 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1178 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1179 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1180 }
1181
1182 /*
1183 * Allocate the control data structures, and create and load the
1184 * DMA map for it.
1185 *
1186 * NOTE: All Tx descriptors must be in the same 4G segment of
1187 * memory. So must Rx descriptors. We simplify by allocating
1188 * both sets within the same 4G segment.
1189 */
1190 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1191 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1192 cdata_size = sc->sc_type < WM_T_82544 ?
1193 sizeof(struct wm_control_data_82542) :
1194 sizeof(struct wm_control_data_82544);
1195 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1196 (bus_size_t) 0x100000000ULL,
1197 &seg, 1, &rseg, 0)) != 0) {
1198 aprint_error_dev(sc->sc_dev,
1199 "unable to allocate control data, error = %d\n",
1200 error);
1201 goto fail_0;
1202 }
1203
1204 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1205 (void **)&sc->sc_control_data,
1206 BUS_DMA_COHERENT)) != 0) {
1207 aprint_error_dev(sc->sc_dev,
1208 "unable to map control data, error = %d\n", error);
1209 goto fail_1;
1210 }
1211
1212 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1213 0, 0, &sc->sc_cddmamap)) != 0) {
1214 aprint_error_dev(sc->sc_dev,
1215 "unable to create control data DMA map, error = %d\n",
1216 error);
1217 goto fail_2;
1218 }
1219
1220 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1221 sc->sc_control_data, cdata_size, NULL,
1222 0)) != 0) {
1223 aprint_error_dev(sc->sc_dev,
1224 "unable to load control data DMA map, error = %d\n",
1225 error);
1226 goto fail_3;
1227 }
1228
1229 /*
1230 * Create the transmit buffer DMA maps.
1231 */
1232 WM_TXQUEUELEN(sc) =
1233 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1234 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1235 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1236 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1237 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1238 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1239 aprint_error_dev(sc->sc_dev,
1240 "unable to create Tx DMA map %d, error = %d\n",
1241 i, error);
1242 goto fail_4;
1243 }
1244 }
1245
1246 /*
1247 * Create the receive buffer DMA maps.
1248 */
1249 for (i = 0; i < WM_NRXDESC; i++) {
1250 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1251 MCLBYTES, 0, 0,
1252 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1253 aprint_error_dev(sc->sc_dev,
1254 "unable to create Rx DMA map %d error = %d\n",
1255 i, error);
1256 goto fail_5;
1257 }
1258 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1259 }
1260
1261 /* clear interesting stat counters */
1262 CSR_READ(sc, WMREG_COLC);
1263 CSR_READ(sc, WMREG_RXERRC);
1264
1265 /*
1266 * Reset the chip to a known state.
1267 */
1268 wm_reset(sc);
1269
1270 switch (sc->sc_type) {
1271 case WM_T_82571:
1272 case WM_T_82572:
1273 case WM_T_82573:
1274 case WM_T_82574:
1275 case WM_T_82583:
1276 case WM_T_80003:
1277 case WM_T_ICH8:
1278 case WM_T_ICH9:
1279 case WM_T_ICH10:
1280 if (wm_check_mng_mode(sc) != 0)
1281 wm_get_hw_control(sc);
1282 break;
1283 default:
1284 break;
1285 }
1286
1287 /*
1288 * Get some information about the EEPROM.
1289 */
1290 switch (sc->sc_type) {
1291 case WM_T_82542_2_0:
1292 case WM_T_82542_2_1:
1293 case WM_T_82543:
1294 case WM_T_82544:
1295 /* Microwire */
1296 sc->sc_ee_addrbits = 6;
1297 break;
1298 case WM_T_82540:
1299 case WM_T_82545:
1300 case WM_T_82545_3:
1301 case WM_T_82546:
1302 case WM_T_82546_3:
1303 /* Microwire */
1304 reg = CSR_READ(sc, WMREG_EECD);
1305 if (reg & EECD_EE_SIZE)
1306 sc->sc_ee_addrbits = 8;
1307 else
1308 sc->sc_ee_addrbits = 6;
1309 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1310 break;
1311 case WM_T_82541:
1312 case WM_T_82541_2:
1313 case WM_T_82547:
1314 case WM_T_82547_2:
1315 reg = CSR_READ(sc, WMREG_EECD);
1316 if (reg & EECD_EE_TYPE) {
1317 /* SPI */
1318 wm_set_spiaddrsize(sc);
1319 } else
1320 /* Microwire */
1321 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1322 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1323 break;
1324 case WM_T_82571:
1325 case WM_T_82572:
1326 /* SPI */
1327 wm_set_spiaddrsize(sc);
1328 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1329 break;
1330 case WM_T_82573:
1331 case WM_T_82574:
1332 case WM_T_82583:
1333 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1334 sc->sc_flags |= WM_F_EEPROM_FLASH;
1335 else {
1336 /* SPI */
1337 wm_set_spiaddrsize(sc);
1338 }
1339 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1340 break;
1341 case WM_T_80003:
1342 /* SPI */
1343 wm_set_spiaddrsize(sc);
1344 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1345 break;
1346 case WM_T_ICH8:
1347 case WM_T_ICH9:
1348 /* Check whether EEPROM is present or not */
1349 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1350 /* Not found */
1351 aprint_error_dev(sc->sc_dev,
1352 "EEPROM PRESENT bit isn't set\n");
1353 sc->sc_flags |= WM_F_EEPROM_INVALID;
1354 }
1355 /* FALLTHROUGH */
1356 case WM_T_ICH10:
1357 /* FLASH */
1358 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1359 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1360 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1361 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1362 aprint_error_dev(sc->sc_dev,
1363 "can't map FLASH registers\n");
1364 return;
1365 }
1366 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1367 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1368 ICH_FLASH_SECTOR_SIZE;
1369 sc->sc_ich8_flash_bank_size =
1370 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1371 sc->sc_ich8_flash_bank_size -=
1372 (reg & ICH_GFPREG_BASE_MASK);
1373 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1374 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1375 break;
1376 default:
1377 break;
1378 }
1379
1380 /*
1381 * Defer printing the EEPROM type until after verifying the checksum
1382 * This allows the EEPROM type to be printed correctly in the case
1383 * that no EEPROM is attached.
1384 */
1385 /*
1386 * Validate the EEPROM checksum. If the checksum fails, flag
1387 * this for later, so we can fail future reads from the EEPROM.
1388 */
1389 if (wm_validate_eeprom_checksum(sc)) {
1390 /*
1391 * Read twice again because some PCI-e parts fail the
1392 * first check due to the link being in sleep state.
1393 */
1394 if (wm_validate_eeprom_checksum(sc))
1395 sc->sc_flags |= WM_F_EEPROM_INVALID;
1396 }
1397
1398 /* Set device properties (macflags) */
1399 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1400
1401 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1402 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1403 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1404 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1405 } else {
1406 if (sc->sc_flags & WM_F_EEPROM_SPI)
1407 eetype = "SPI";
1408 else
1409 eetype = "MicroWire";
1410 aprint_verbose_dev(sc->sc_dev,
1411 "%u word (%d address bits) %s EEPROM\n",
1412 1U << sc->sc_ee_addrbits,
1413 sc->sc_ee_addrbits, eetype);
1414 }
1415
1416 /*
1417 * Read the Ethernet address from the EEPROM, if not first found
1418 * in device properties.
1419 */
1420 ea = prop_dictionary_get(dict, "mac-addr");
1421 if (ea != NULL) {
1422 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1423 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1424 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1425 } else {
1426 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1427 sizeof(myea) / sizeof(myea[0]), myea)) {
1428 aprint_error_dev(sc->sc_dev,
1429 "unable to read Ethernet address\n");
1430 return;
1431 }
1432 enaddr[0] = myea[0] & 0xff;
1433 enaddr[1] = myea[0] >> 8;
1434 enaddr[2] = myea[1] & 0xff;
1435 enaddr[3] = myea[1] >> 8;
1436 enaddr[4] = myea[2] & 0xff;
1437 enaddr[5] = myea[2] >> 8;
1438 }
1439
1440 /*
1441 * Toggle the LSB of the MAC address on the second port
1442 * of the dual port controller.
1443 */
1444 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1445 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1446 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1447 enaddr[5] ^= 1;
1448 }
1449
1450 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1451 ether_sprintf(enaddr));
1452
1453 /*
1454 * Read the config info from the EEPROM, and set up various
1455 * bits in the control registers based on their contents.
1456 */
1457 pn = prop_dictionary_get(dict, "i82543-cfg1");
1458 if (pn != NULL) {
1459 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1460 cfg1 = (uint16_t) prop_number_integer_value(pn);
1461 } else {
1462 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1463 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1464 return;
1465 }
1466 }
1467
1468 pn = prop_dictionary_get(dict, "i82543-cfg2");
1469 if (pn != NULL) {
1470 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1471 cfg2 = (uint16_t) prop_number_integer_value(pn);
1472 } else {
1473 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1474 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1475 return;
1476 }
1477 }
1478
1479 if (sc->sc_type >= WM_T_82544) {
1480 pn = prop_dictionary_get(dict, "i82543-swdpin");
1481 if (pn != NULL) {
1482 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1483 swdpin = (uint16_t) prop_number_integer_value(pn);
1484 } else {
1485 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1486 aprint_error_dev(sc->sc_dev,
1487 "unable to read SWDPIN\n");
1488 return;
1489 }
1490 }
1491 }
1492
1493 if (cfg1 & EEPROM_CFG1_ILOS)
1494 sc->sc_ctrl |= CTRL_ILOS;
1495 if (sc->sc_type >= WM_T_82544) {
1496 sc->sc_ctrl |=
1497 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1498 CTRL_SWDPIO_SHIFT;
1499 sc->sc_ctrl |=
1500 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1501 CTRL_SWDPINS_SHIFT;
1502 } else {
1503 sc->sc_ctrl |=
1504 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1505 CTRL_SWDPIO_SHIFT;
1506 }
1507
1508 #if 0
1509 if (sc->sc_type >= WM_T_82544) {
1510 if (cfg1 & EEPROM_CFG1_IPS0)
1511 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1512 if (cfg1 & EEPROM_CFG1_IPS1)
1513 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1514 sc->sc_ctrl_ext |=
1515 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1516 CTRL_EXT_SWDPIO_SHIFT;
1517 sc->sc_ctrl_ext |=
1518 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1519 CTRL_EXT_SWDPINS_SHIFT;
1520 } else {
1521 sc->sc_ctrl_ext |=
1522 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1523 CTRL_EXT_SWDPIO_SHIFT;
1524 }
1525 #endif
1526
1527 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1528 #if 0
1529 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1530 #endif
1531
1532 /*
1533 * Set up some register offsets that are different between
1534 * the i82542 and the i82543 and later chips.
1535 */
1536 if (sc->sc_type < WM_T_82543) {
1537 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1538 sc->sc_tdt_reg = WMREG_OLD_TDT;
1539 } else {
1540 sc->sc_rdt_reg = WMREG_RDT;
1541 sc->sc_tdt_reg = WMREG_TDT;
1542 }
1543
1544 /*
1545 * Determine if we're TBI or GMII mode, and initialize the
1546 * media structures accordingly.
1547 */
1548 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1549 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1550 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1551 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1552 wm_gmii_mediainit(sc);
1553 } else if (sc->sc_type < WM_T_82543 ||
1554 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1555 if (wmp->wmp_flags & WMP_F_1000T)
1556 aprint_error_dev(sc->sc_dev,
1557 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1558 wm_tbi_mediainit(sc);
1559 } else {
1560 if (wmp->wmp_flags & WMP_F_1000X)
1561 aprint_error_dev(sc->sc_dev,
1562 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1563 wm_gmii_mediainit(sc);
1564 }
1565
1566 ifp = &sc->sc_ethercom.ec_if;
1567 xname = device_xname(sc->sc_dev);
1568 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1569 ifp->if_softc = sc;
1570 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1571 ifp->if_ioctl = wm_ioctl;
1572 ifp->if_start = wm_start;
1573 ifp->if_watchdog = wm_watchdog;
1574 ifp->if_init = wm_init;
1575 ifp->if_stop = wm_stop;
1576 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1577 IFQ_SET_READY(&ifp->if_snd);
1578
1579 /* Check for jumbo frame */
1580 switch (sc->sc_type) {
1581 case WM_T_82573:
1582 /* XXX limited to 9234 if ASPM is disabled */
1583 wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1584 if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1585 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1586 break;
1587 case WM_T_82571:
1588 case WM_T_82572:
1589 case WM_T_82574:
1590 case WM_T_80003:
1591 case WM_T_ICH9:
1592 case WM_T_ICH10:
1593 /* XXX limited to 9234 */
1594 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1595 break;
1596 case WM_T_82542_2_0:
1597 case WM_T_82542_2_1:
1598 case WM_T_82583:
1599 case WM_T_ICH8:
1600 /* No support for jumbo frame */
1601 break;
1602 default:
1603 /* ETHER_MAX_LEN_JUMBO */
1604 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1605 break;
1606 }
1607
1608 /*
1609 * If we're a i82543 or greater, we can support VLANs.
1610 */
1611 if (sc->sc_type >= WM_T_82543)
1612 sc->sc_ethercom.ec_capabilities |=
1613 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1614
1615 /*
1616 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1617 * on i82543 and later.
1618 */
1619 if (sc->sc_type >= WM_T_82543) {
1620 ifp->if_capabilities |=
1621 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1622 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1623 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1624 IFCAP_CSUM_TCPv6_Tx |
1625 IFCAP_CSUM_UDPv6_Tx;
1626 }
1627
1628 /*
1629 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1630 *
1631 * 82541GI (8086:1076) ... no
1632 * 82572EI (8086:10b9) ... yes
1633 */
1634 if (sc->sc_type >= WM_T_82571) {
1635 ifp->if_capabilities |=
1636 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1637 }
1638
1639 /*
1640 * If we're a i82544 or greater (except i82547), we can do
1641 * TCP segmentation offload.
1642 */
1643 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1644 ifp->if_capabilities |= IFCAP_TSOv4;
1645 }
1646
1647 if (sc->sc_type >= WM_T_82571) {
1648 ifp->if_capabilities |= IFCAP_TSOv6;
1649 }
1650
1651 /*
1652 * Attach the interface.
1653 */
1654 if_attach(ifp);
1655 ether_ifattach(ifp, enaddr);
1656 #if NRND > 0
1657 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1658 #endif
1659
1660 #ifdef WM_EVENT_COUNTERS
1661 /* Attach event counters. */
1662 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1663 NULL, xname, "txsstall");
1664 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1665 NULL, xname, "txdstall");
1666 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1667 NULL, xname, "txfifo_stall");
1668 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1669 NULL, xname, "txdw");
1670 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1671 NULL, xname, "txqe");
1672 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1673 NULL, xname, "rxintr");
1674 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1675 NULL, xname, "linkintr");
1676
1677 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1678 NULL, xname, "rxipsum");
1679 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1680 NULL, xname, "rxtusum");
1681 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1682 NULL, xname, "txipsum");
1683 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1684 NULL, xname, "txtusum");
1685 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1686 NULL, xname, "txtusum6");
1687
1688 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1689 NULL, xname, "txtso");
1690 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1691 NULL, xname, "txtso6");
1692 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1693 NULL, xname, "txtsopain");
1694
1695 for (i = 0; i < WM_NTXSEGS; i++) {
1696 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1697 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1698 NULL, xname, wm_txseg_evcnt_names[i]);
1699 }
1700
1701 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1702 NULL, xname, "txdrop");
1703
1704 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1705 NULL, xname, "tu");
1706
1707 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1708 NULL, xname, "tx_xoff");
1709 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1710 NULL, xname, "tx_xon");
1711 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1712 NULL, xname, "rx_xoff");
1713 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1714 NULL, xname, "rx_xon");
1715 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1716 NULL, xname, "rx_macctl");
1717 #endif /* WM_EVENT_COUNTERS */
1718
1719 if (pmf_device_register(self, NULL, NULL))
1720 pmf_class_network_register(self, ifp);
1721 else
1722 aprint_error_dev(self, "couldn't establish power handler\n");
1723
1724 return;
1725
1726 /*
1727 * Free any resources we've allocated during the failed attach
1728 * attempt. Do this in reverse order and fall through.
1729 */
1730 fail_5:
1731 for (i = 0; i < WM_NRXDESC; i++) {
1732 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1733 bus_dmamap_destroy(sc->sc_dmat,
1734 sc->sc_rxsoft[i].rxs_dmamap);
1735 }
1736 fail_4:
1737 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1738 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1739 bus_dmamap_destroy(sc->sc_dmat,
1740 sc->sc_txsoft[i].txs_dmamap);
1741 }
1742 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1743 fail_3:
1744 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1745 fail_2:
1746 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1747 cdata_size);
1748 fail_1:
1749 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1750 fail_0:
1751 return;
1752 }
1753
1754 /*
1755 * wm_tx_offload:
1756 *
1757 * Set up TCP/IP checksumming parameters for the
1758 * specified packet.
1759 */
1760 static int
1761 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1762 uint8_t *fieldsp)
1763 {
1764 struct mbuf *m0 = txs->txs_mbuf;
1765 struct livengood_tcpip_ctxdesc *t;
1766 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1767 uint32_t ipcse;
1768 struct ether_header *eh;
1769 int offset, iphl;
1770 uint8_t fields;
1771
1772 /*
1773 * XXX It would be nice if the mbuf pkthdr had offset
1774 * fields for the protocol headers.
1775 */
1776
1777 eh = mtod(m0, struct ether_header *);
1778 switch (htons(eh->ether_type)) {
1779 case ETHERTYPE_IP:
1780 case ETHERTYPE_IPV6:
1781 offset = ETHER_HDR_LEN;
1782 break;
1783
1784 case ETHERTYPE_VLAN:
1785 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1786 break;
1787
1788 default:
1789 /*
1790 * Don't support this protocol or encapsulation.
1791 */
1792 *fieldsp = 0;
1793 *cmdp = 0;
1794 return (0);
1795 }
1796
1797 if ((m0->m_pkthdr.csum_flags &
1798 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1799 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1800 } else {
1801 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1802 }
1803 ipcse = offset + iphl - 1;
1804
1805 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1806 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1807 seg = 0;
1808 fields = 0;
1809
1810 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1811 int hlen = offset + iphl;
1812 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1813
1814 if (__predict_false(m0->m_len <
1815 (hlen + sizeof(struct tcphdr)))) {
1816 /*
1817 * TCP/IP headers are not in the first mbuf; we need
1818 * to do this the slow and painful way. Let's just
1819 * hope this doesn't happen very often.
1820 */
1821 struct tcphdr th;
1822
1823 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1824
1825 m_copydata(m0, hlen, sizeof(th), &th);
1826 if (v4) {
1827 struct ip ip;
1828
1829 m_copydata(m0, offset, sizeof(ip), &ip);
1830 ip.ip_len = 0;
1831 m_copyback(m0,
1832 offset + offsetof(struct ip, ip_len),
1833 sizeof(ip.ip_len), &ip.ip_len);
1834 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1835 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1836 } else {
1837 struct ip6_hdr ip6;
1838
1839 m_copydata(m0, offset, sizeof(ip6), &ip6);
1840 ip6.ip6_plen = 0;
1841 m_copyback(m0,
1842 offset + offsetof(struct ip6_hdr, ip6_plen),
1843 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1844 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1845 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1846 }
1847 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1848 sizeof(th.th_sum), &th.th_sum);
1849
1850 hlen += th.th_off << 2;
1851 } else {
1852 /*
1853 * TCP/IP headers are in the first mbuf; we can do
1854 * this the easy way.
1855 */
1856 struct tcphdr *th;
1857
1858 if (v4) {
1859 struct ip *ip =
1860 (void *)(mtod(m0, char *) + offset);
1861 th = (void *)(mtod(m0, char *) + hlen);
1862
1863 ip->ip_len = 0;
1864 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1865 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1866 } else {
1867 struct ip6_hdr *ip6 =
1868 (void *)(mtod(m0, char *) + offset);
1869 th = (void *)(mtod(m0, char *) + hlen);
1870
1871 ip6->ip6_plen = 0;
1872 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1873 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1874 }
1875 hlen += th->th_off << 2;
1876 }
1877
1878 if (v4) {
1879 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1880 cmdlen |= WTX_TCPIP_CMD_IP;
1881 } else {
1882 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1883 ipcse = 0;
1884 }
1885 cmd |= WTX_TCPIP_CMD_TSE;
1886 cmdlen |= WTX_TCPIP_CMD_TSE |
1887 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1888 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1889 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1890 }
1891
1892 /*
1893 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1894 * offload feature, if we load the context descriptor, we
1895 * MUST provide valid values for IPCSS and TUCSS fields.
1896 */
1897
1898 ipcs = WTX_TCPIP_IPCSS(offset) |
1899 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1900 WTX_TCPIP_IPCSE(ipcse);
1901 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1902 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1903 fields |= WTX_IXSM;
1904 }
1905
1906 offset += iphl;
1907
1908 if (m0->m_pkthdr.csum_flags &
1909 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1910 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1911 fields |= WTX_TXSM;
1912 tucs = WTX_TCPIP_TUCSS(offset) |
1913 WTX_TCPIP_TUCSO(offset +
1914 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1915 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1916 } else if ((m0->m_pkthdr.csum_flags &
1917 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1918 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1919 fields |= WTX_TXSM;
1920 tucs = WTX_TCPIP_TUCSS(offset) |
1921 WTX_TCPIP_TUCSO(offset +
1922 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1923 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1924 } else {
1925 /* Just initialize it to a valid TCP context. */
1926 tucs = WTX_TCPIP_TUCSS(offset) |
1927 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1928 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1929 }
1930
1931 /* Fill in the context descriptor. */
1932 t = (struct livengood_tcpip_ctxdesc *)
1933 &sc->sc_txdescs[sc->sc_txnext];
1934 t->tcpip_ipcs = htole32(ipcs);
1935 t->tcpip_tucs = htole32(tucs);
1936 t->tcpip_cmdlen = htole32(cmdlen);
1937 t->tcpip_seg = htole32(seg);
1938 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1939
1940 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1941 txs->txs_ndesc++;
1942
1943 *cmdp = cmd;
1944 *fieldsp = fields;
1945
1946 return (0);
1947 }
1948
1949 static void
1950 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1951 {
1952 struct mbuf *m;
1953 int i;
1954
1955 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1956 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1957 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1958 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1959 m->m_data, m->m_len, m->m_flags);
1960 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1961 i, i == 1 ? "" : "s");
1962 }
1963
1964 /*
1965 * wm_82547_txfifo_stall:
1966 *
1967 * Callout used to wait for the 82547 Tx FIFO to drain,
1968 * reset the FIFO pointers, and restart packet transmission.
1969 */
1970 static void
1971 wm_82547_txfifo_stall(void *arg)
1972 {
1973 struct wm_softc *sc = arg;
1974 int s;
1975
1976 s = splnet();
1977
1978 if (sc->sc_txfifo_stall) {
1979 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1980 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1981 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1982 /*
1983 * Packets have drained. Stop transmitter, reset
1984 * FIFO pointers, restart transmitter, and kick
1985 * the packet queue.
1986 */
1987 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1988 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1989 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1990 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1991 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1992 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1993 CSR_WRITE(sc, WMREG_TCTL, tctl);
1994 CSR_WRITE_FLUSH(sc);
1995
1996 sc->sc_txfifo_head = 0;
1997 sc->sc_txfifo_stall = 0;
1998 wm_start(&sc->sc_ethercom.ec_if);
1999 } else {
2000 /*
2001 * Still waiting for packets to drain; try again in
2002 * another tick.
2003 */
2004 callout_schedule(&sc->sc_txfifo_ch, 1);
2005 }
2006 }
2007
2008 splx(s);
2009 }
2010
2011 /*
2012 * wm_82547_txfifo_bugchk:
2013 *
2014 * Check for bug condition in the 82547 Tx FIFO. We need to
2015 * prevent enqueueing a packet that would wrap around the end
2016 * if the Tx FIFO ring buffer, otherwise the chip will croak.
2017 *
2018 * We do this by checking the amount of space before the end
2019 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
2020 * the Tx FIFO, wait for all remaining packets to drain, reset
2021 * the internal FIFO pointers to the beginning, and restart
2022 * transmission on the interface.
2023 */
2024 #define WM_FIFO_HDR 0x10
2025 #define WM_82547_PAD_LEN 0x3e0
2026 static int
2027 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2028 {
2029 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2030 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2031
2032 /* Just return if already stalled. */
2033 if (sc->sc_txfifo_stall)
2034 return (1);
2035
2036 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2037 /* Stall only occurs in half-duplex mode. */
2038 goto send_packet;
2039 }
2040
2041 if (len >= WM_82547_PAD_LEN + space) {
2042 sc->sc_txfifo_stall = 1;
2043 callout_schedule(&sc->sc_txfifo_ch, 1);
2044 return (1);
2045 }
2046
2047 send_packet:
2048 sc->sc_txfifo_head += len;
2049 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2050 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2051
2052 return (0);
2053 }
2054
2055 /*
2056 * wm_start: [ifnet interface function]
2057 *
2058 * Start packet transmission on the interface.
2059 */
2060 static void
2061 wm_start(struct ifnet *ifp)
2062 {
2063 struct wm_softc *sc = ifp->if_softc;
2064 struct mbuf *m0;
2065 struct m_tag *mtag;
2066 struct wm_txsoft *txs;
2067 bus_dmamap_t dmamap;
2068 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2069 bus_addr_t curaddr;
2070 bus_size_t seglen, curlen;
2071 uint32_t cksumcmd;
2072 uint8_t cksumfields;
2073
2074 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2075 return;
2076
2077 /*
2078 * Remember the previous number of free descriptors.
2079 */
2080 ofree = sc->sc_txfree;
2081
2082 /*
2083 * Loop through the send queue, setting up transmit descriptors
2084 * until we drain the queue, or use up all available transmit
2085 * descriptors.
2086 */
2087 for (;;) {
2088 /* Grab a packet off the queue. */
2089 IFQ_POLL(&ifp->if_snd, m0);
2090 if (m0 == NULL)
2091 break;
2092
2093 DPRINTF(WM_DEBUG_TX,
2094 ("%s: TX: have packet to transmit: %p\n",
2095 device_xname(sc->sc_dev), m0));
2096
2097 /* Get a work queue entry. */
2098 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2099 wm_txintr(sc);
2100 if (sc->sc_txsfree == 0) {
2101 DPRINTF(WM_DEBUG_TX,
2102 ("%s: TX: no free job descriptors\n",
2103 device_xname(sc->sc_dev)));
2104 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2105 break;
2106 }
2107 }
2108
2109 txs = &sc->sc_txsoft[sc->sc_txsnext];
2110 dmamap = txs->txs_dmamap;
2111
2112 use_tso = (m0->m_pkthdr.csum_flags &
2113 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2114
2115 /*
2116 * So says the Linux driver:
2117 * The controller does a simple calculation to make sure
2118 * there is enough room in the FIFO before initiating the
2119 * DMA for each buffer. The calc is:
2120 * 4 = ceil(buffer len / MSS)
2121 * To make sure we don't overrun the FIFO, adjust the max
2122 * buffer len if the MSS drops.
2123 */
2124 dmamap->dm_maxsegsz =
2125 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2126 ? m0->m_pkthdr.segsz << 2
2127 : WTX_MAX_LEN;
2128
2129 /*
2130 * Load the DMA map. If this fails, the packet either
2131 * didn't fit in the allotted number of segments, or we
2132 * were short on resources. For the too-many-segments
2133 * case, we simply report an error and drop the packet,
2134 * since we can't sanely copy a jumbo packet to a single
2135 * buffer.
2136 */
2137 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2138 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2139 if (error) {
2140 if (error == EFBIG) {
2141 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2142 log(LOG_ERR, "%s: Tx packet consumes too many "
2143 "DMA segments, dropping...\n",
2144 device_xname(sc->sc_dev));
2145 IFQ_DEQUEUE(&ifp->if_snd, m0);
2146 wm_dump_mbuf_chain(sc, m0);
2147 m_freem(m0);
2148 continue;
2149 }
2150 /*
2151 * Short on resources, just stop for now.
2152 */
2153 DPRINTF(WM_DEBUG_TX,
2154 ("%s: TX: dmamap load failed: %d\n",
2155 device_xname(sc->sc_dev), error));
2156 break;
2157 }
2158
2159 segs_needed = dmamap->dm_nsegs;
2160 if (use_tso) {
2161 /* For sentinel descriptor; see below. */
2162 segs_needed++;
2163 }
2164
2165 /*
2166 * Ensure we have enough descriptors free to describe
2167 * the packet. Note, we always reserve one descriptor
2168 * at the end of the ring due to the semantics of the
2169 * TDT register, plus one more in the event we need
2170 * to load offload context.
2171 */
2172 if (segs_needed > sc->sc_txfree - 2) {
2173 /*
2174 * Not enough free descriptors to transmit this
2175 * packet. We haven't committed anything yet,
2176 * so just unload the DMA map, put the packet
2177 * pack on the queue, and punt. Notify the upper
2178 * layer that there are no more slots left.
2179 */
2180 DPRINTF(WM_DEBUG_TX,
2181 ("%s: TX: need %d (%d) descriptors, have %d\n",
2182 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2183 segs_needed, sc->sc_txfree - 1));
2184 ifp->if_flags |= IFF_OACTIVE;
2185 bus_dmamap_unload(sc->sc_dmat, dmamap);
2186 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2187 break;
2188 }
2189
2190 /*
2191 * Check for 82547 Tx FIFO bug. We need to do this
2192 * once we know we can transmit the packet, since we
2193 * do some internal FIFO space accounting here.
2194 */
2195 if (sc->sc_type == WM_T_82547 &&
2196 wm_82547_txfifo_bugchk(sc, m0)) {
2197 DPRINTF(WM_DEBUG_TX,
2198 ("%s: TX: 82547 Tx FIFO bug detected\n",
2199 device_xname(sc->sc_dev)));
2200 ifp->if_flags |= IFF_OACTIVE;
2201 bus_dmamap_unload(sc->sc_dmat, dmamap);
2202 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2203 break;
2204 }
2205
2206 IFQ_DEQUEUE(&ifp->if_snd, m0);
2207
2208 /*
2209 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2210 */
2211
2212 DPRINTF(WM_DEBUG_TX,
2213 ("%s: TX: packet has %d (%d) DMA segments\n",
2214 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2215
2216 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2217
2218 /*
2219 * Store a pointer to the packet so that we can free it
2220 * later.
2221 *
2222 * Initially, we consider the number of descriptors the
2223 * packet uses the number of DMA segments. This may be
2224 * incremented by 1 if we do checksum offload (a descriptor
2225 * is used to set the checksum context).
2226 */
2227 txs->txs_mbuf = m0;
2228 txs->txs_firstdesc = sc->sc_txnext;
2229 txs->txs_ndesc = segs_needed;
2230
2231 /* Set up offload parameters for this packet. */
2232 if (m0->m_pkthdr.csum_flags &
2233 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2234 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2235 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2236 if (wm_tx_offload(sc, txs, &cksumcmd,
2237 &cksumfields) != 0) {
2238 /* Error message already displayed. */
2239 bus_dmamap_unload(sc->sc_dmat, dmamap);
2240 continue;
2241 }
2242 } else {
2243 cksumcmd = 0;
2244 cksumfields = 0;
2245 }
2246
2247 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2248
2249 /* Sync the DMA map. */
2250 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2251 BUS_DMASYNC_PREWRITE);
2252
2253 /*
2254 * Initialize the transmit descriptor.
2255 */
2256 for (nexttx = sc->sc_txnext, seg = 0;
2257 seg < dmamap->dm_nsegs; seg++) {
2258 for (seglen = dmamap->dm_segs[seg].ds_len,
2259 curaddr = dmamap->dm_segs[seg].ds_addr;
2260 seglen != 0;
2261 curaddr += curlen, seglen -= curlen,
2262 nexttx = WM_NEXTTX(sc, nexttx)) {
2263 curlen = seglen;
2264
2265 /*
2266 * So says the Linux driver:
2267 * Work around for premature descriptor
2268 * write-backs in TSO mode. Append a
2269 * 4-byte sentinel descriptor.
2270 */
2271 if (use_tso &&
2272 seg == dmamap->dm_nsegs - 1 &&
2273 curlen > 8)
2274 curlen -= 4;
2275
2276 wm_set_dma_addr(
2277 &sc->sc_txdescs[nexttx].wtx_addr,
2278 curaddr);
2279 sc->sc_txdescs[nexttx].wtx_cmdlen =
2280 htole32(cksumcmd | curlen);
2281 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2282 0;
2283 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2284 cksumfields;
2285 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2286 lasttx = nexttx;
2287
2288 DPRINTF(WM_DEBUG_TX,
2289 ("%s: TX: desc %d: low 0x%08lx, "
2290 "len 0x%04x\n",
2291 device_xname(sc->sc_dev), nexttx,
2292 curaddr & 0xffffffffUL, (unsigned)curlen));
2293 }
2294 }
2295
2296 KASSERT(lasttx != -1);
2297
2298 /*
2299 * Set up the command byte on the last descriptor of
2300 * the packet. If we're in the interrupt delay window,
2301 * delay the interrupt.
2302 */
2303 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2304 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2305
2306 /*
2307 * If VLANs are enabled and the packet has a VLAN tag, set
2308 * up the descriptor to encapsulate the packet for us.
2309 *
2310 * This is only valid on the last descriptor of the packet.
2311 */
2312 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2313 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2314 htole32(WTX_CMD_VLE);
2315 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2316 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2317 }
2318
2319 txs->txs_lastdesc = lasttx;
2320
2321 DPRINTF(WM_DEBUG_TX,
2322 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2323 device_xname(sc->sc_dev),
2324 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2325
2326 /* Sync the descriptors we're using. */
2327 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2328 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2329
2330 /* Give the packet to the chip. */
2331 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2332
2333 DPRINTF(WM_DEBUG_TX,
2334 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2335
2336 DPRINTF(WM_DEBUG_TX,
2337 ("%s: TX: finished transmitting packet, job %d\n",
2338 device_xname(sc->sc_dev), sc->sc_txsnext));
2339
2340 /* Advance the tx pointer. */
2341 sc->sc_txfree -= txs->txs_ndesc;
2342 sc->sc_txnext = nexttx;
2343
2344 sc->sc_txsfree--;
2345 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2346
2347 #if NBPFILTER > 0
2348 /* Pass the packet to any BPF listeners. */
2349 if (ifp->if_bpf)
2350 bpf_mtap(ifp->if_bpf, m0);
2351 #endif /* NBPFILTER > 0 */
2352 }
2353
2354 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2355 /* No more slots; notify upper layer. */
2356 ifp->if_flags |= IFF_OACTIVE;
2357 }
2358
2359 if (sc->sc_txfree != ofree) {
2360 /* Set a watchdog timer in case the chip flakes out. */
2361 ifp->if_timer = 5;
2362 }
2363 }
2364
2365 /*
2366 * wm_watchdog: [ifnet interface function]
2367 *
2368 * Watchdog timer handler.
2369 */
2370 static void
2371 wm_watchdog(struct ifnet *ifp)
2372 {
2373 struct wm_softc *sc = ifp->if_softc;
2374
2375 /*
2376 * Since we're using delayed interrupts, sweep up
2377 * before we report an error.
2378 */
2379 wm_txintr(sc);
2380
2381 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2382 log(LOG_ERR,
2383 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2384 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2385 sc->sc_txnext);
2386 ifp->if_oerrors++;
2387
2388 /* Reset the interface. */
2389 (void) wm_init(ifp);
2390 }
2391
2392 /* Try to get more packets going. */
2393 wm_start(ifp);
2394 }
2395
2396 /*
2397 * wm_ioctl: [ifnet interface function]
2398 *
2399 * Handle control requests from the operator.
2400 */
2401 static int
2402 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2403 {
2404 struct wm_softc *sc = ifp->if_softc;
2405 struct ifreq *ifr = (struct ifreq *) data;
2406 struct ifaddr *ifa = (struct ifaddr *)data;
2407 struct sockaddr_dl *sdl;
2408 int diff, s, error;
2409
2410 s = splnet();
2411
2412 switch (cmd) {
2413 case SIOCSIFFLAGS:
2414 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2415 break;
2416 if (ifp->if_flags & IFF_UP) {
2417 diff = (ifp->if_flags ^ sc->sc_if_flags)
2418 & (IFF_PROMISC | IFF_ALLMULTI);
2419 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2420 /*
2421 * If the difference bettween last flag and
2422 * new flag is only IFF_PROMISC or
2423 * IFF_ALLMULTI, set multicast filter only
2424 * (don't reset to prevent link down).
2425 */
2426 wm_set_filter(sc);
2427 } else {
2428 /*
2429 * Reset the interface to pick up changes in
2430 * any other flags that affect the hardware
2431 * state.
2432 */
2433 wm_init(ifp);
2434 }
2435 } else {
2436 if (ifp->if_flags & IFF_RUNNING)
2437 wm_stop(ifp, 1);
2438 }
2439 sc->sc_if_flags = ifp->if_flags;
2440 error = 0;
2441 break;
2442 case SIOCSIFMEDIA:
2443 case SIOCGIFMEDIA:
2444 /* Flow control requires full-duplex mode. */
2445 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2446 (ifr->ifr_media & IFM_FDX) == 0)
2447 ifr->ifr_media &= ~IFM_ETH_FMASK;
2448 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2449 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2450 /* We can do both TXPAUSE and RXPAUSE. */
2451 ifr->ifr_media |=
2452 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2453 }
2454 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2455 }
2456 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2457 break;
2458 case SIOCINITIFADDR:
2459 if (ifa->ifa_addr->sa_family == AF_LINK) {
2460 sdl = satosdl(ifp->if_dl->ifa_addr);
2461 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2462 LLADDR(satosdl(ifa->ifa_addr)),
2463 ifp->if_addrlen);
2464 /* unicast address is first multicast entry */
2465 wm_set_filter(sc);
2466 error = 0;
2467 break;
2468 }
2469 /* Fall through for rest */
2470 default:
2471 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2472 break;
2473
2474 error = 0;
2475
2476 if (cmd == SIOCSIFCAP)
2477 error = (*ifp->if_init)(ifp);
2478 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2479 ;
2480 else if (ifp->if_flags & IFF_RUNNING) {
2481 /*
2482 * Multicast list has changed; set the hardware filter
2483 * accordingly.
2484 */
2485 wm_set_filter(sc);
2486 }
2487 break;
2488 }
2489
2490 /* Try to get more packets going. */
2491 wm_start(ifp);
2492
2493 splx(s);
2494 return (error);
2495 }
2496
2497 /*
2498 * wm_intr:
2499 *
2500 * Interrupt service routine.
2501 */
2502 static int
2503 wm_intr(void *arg)
2504 {
2505 struct wm_softc *sc = arg;
2506 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2507 uint32_t icr;
2508 int handled = 0;
2509
2510 while (1 /* CONSTCOND */) {
2511 icr = CSR_READ(sc, WMREG_ICR);
2512 if ((icr & sc->sc_icr) == 0)
2513 break;
2514 #if 0 /*NRND > 0*/
2515 if (RND_ENABLED(&sc->rnd_source))
2516 rnd_add_uint32(&sc->rnd_source, icr);
2517 #endif
2518
2519 handled = 1;
2520
2521 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2522 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2523 DPRINTF(WM_DEBUG_RX,
2524 ("%s: RX: got Rx intr 0x%08x\n",
2525 device_xname(sc->sc_dev),
2526 icr & (ICR_RXDMT0|ICR_RXT0)));
2527 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2528 }
2529 #endif
2530 wm_rxintr(sc);
2531
2532 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2533 if (icr & ICR_TXDW) {
2534 DPRINTF(WM_DEBUG_TX,
2535 ("%s: TX: got TXDW interrupt\n",
2536 device_xname(sc->sc_dev)));
2537 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2538 }
2539 #endif
2540 wm_txintr(sc);
2541
2542 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2543 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2544 wm_linkintr(sc, icr);
2545 }
2546
2547 if (icr & ICR_RXO) {
2548 ifp->if_ierrors++;
2549 #if defined(WM_DEBUG)
2550 log(LOG_WARNING, "%s: Receive overrun\n",
2551 device_xname(sc->sc_dev));
2552 #endif /* defined(WM_DEBUG) */
2553 }
2554 }
2555
2556 if (handled) {
2557 /* Try to get more packets going. */
2558 wm_start(ifp);
2559 }
2560
2561 return (handled);
2562 }
2563
2564 /*
2565 * wm_txintr:
2566 *
2567 * Helper; handle transmit interrupts.
2568 */
2569 static void
2570 wm_txintr(struct wm_softc *sc)
2571 {
2572 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2573 struct wm_txsoft *txs;
2574 uint8_t status;
2575 int i;
2576
2577 ifp->if_flags &= ~IFF_OACTIVE;
2578
2579 /*
2580 * Go through the Tx list and free mbufs for those
2581 * frames which have been transmitted.
2582 */
2583 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2584 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2585 txs = &sc->sc_txsoft[i];
2586
2587 DPRINTF(WM_DEBUG_TX,
2588 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2589
2590 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2591 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2592
2593 status =
2594 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2595 if ((status & WTX_ST_DD) == 0) {
2596 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2597 BUS_DMASYNC_PREREAD);
2598 break;
2599 }
2600
2601 DPRINTF(WM_DEBUG_TX,
2602 ("%s: TX: job %d done: descs %d..%d\n",
2603 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2604 txs->txs_lastdesc));
2605
2606 /*
2607 * XXX We should probably be using the statistics
2608 * XXX registers, but I don't know if they exist
2609 * XXX on chips before the i82544.
2610 */
2611
2612 #ifdef WM_EVENT_COUNTERS
2613 if (status & WTX_ST_TU)
2614 WM_EVCNT_INCR(&sc->sc_ev_tu);
2615 #endif /* WM_EVENT_COUNTERS */
2616
2617 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2618 ifp->if_oerrors++;
2619 if (status & WTX_ST_LC)
2620 log(LOG_WARNING, "%s: late collision\n",
2621 device_xname(sc->sc_dev));
2622 else if (status & WTX_ST_EC) {
2623 ifp->if_collisions += 16;
2624 log(LOG_WARNING, "%s: excessive collisions\n",
2625 device_xname(sc->sc_dev));
2626 }
2627 } else
2628 ifp->if_opackets++;
2629
2630 sc->sc_txfree += txs->txs_ndesc;
2631 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2632 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2633 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2634 m_freem(txs->txs_mbuf);
2635 txs->txs_mbuf = NULL;
2636 }
2637
2638 /* Update the dirty transmit buffer pointer. */
2639 sc->sc_txsdirty = i;
2640 DPRINTF(WM_DEBUG_TX,
2641 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2642
2643 /*
2644 * If there are no more pending transmissions, cancel the watchdog
2645 * timer.
2646 */
2647 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2648 ifp->if_timer = 0;
2649 }
2650
2651 /*
2652 * wm_rxintr:
2653 *
2654 * Helper; handle receive interrupts.
2655 */
2656 static void
2657 wm_rxintr(struct wm_softc *sc)
2658 {
2659 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2660 struct wm_rxsoft *rxs;
2661 struct mbuf *m;
2662 int i, len;
2663 uint8_t status, errors;
2664 uint16_t vlantag;
2665
2666 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2667 rxs = &sc->sc_rxsoft[i];
2668
2669 DPRINTF(WM_DEBUG_RX,
2670 ("%s: RX: checking descriptor %d\n",
2671 device_xname(sc->sc_dev), i));
2672
2673 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2674
2675 status = sc->sc_rxdescs[i].wrx_status;
2676 errors = sc->sc_rxdescs[i].wrx_errors;
2677 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2678 vlantag = sc->sc_rxdescs[i].wrx_special;
2679
2680 if ((status & WRX_ST_DD) == 0) {
2681 /*
2682 * We have processed all of the receive descriptors.
2683 */
2684 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2685 break;
2686 }
2687
2688 if (__predict_false(sc->sc_rxdiscard)) {
2689 DPRINTF(WM_DEBUG_RX,
2690 ("%s: RX: discarding contents of descriptor %d\n",
2691 device_xname(sc->sc_dev), i));
2692 WM_INIT_RXDESC(sc, i);
2693 if (status & WRX_ST_EOP) {
2694 /* Reset our state. */
2695 DPRINTF(WM_DEBUG_RX,
2696 ("%s: RX: resetting rxdiscard -> 0\n",
2697 device_xname(sc->sc_dev)));
2698 sc->sc_rxdiscard = 0;
2699 }
2700 continue;
2701 }
2702
2703 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2704 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2705
2706 m = rxs->rxs_mbuf;
2707
2708 /*
2709 * Add a new receive buffer to the ring, unless of
2710 * course the length is zero. Treat the latter as a
2711 * failed mapping.
2712 */
2713 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2714 /*
2715 * Failed, throw away what we've done so
2716 * far, and discard the rest of the packet.
2717 */
2718 ifp->if_ierrors++;
2719 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2720 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2721 WM_INIT_RXDESC(sc, i);
2722 if ((status & WRX_ST_EOP) == 0)
2723 sc->sc_rxdiscard = 1;
2724 if (sc->sc_rxhead != NULL)
2725 m_freem(sc->sc_rxhead);
2726 WM_RXCHAIN_RESET(sc);
2727 DPRINTF(WM_DEBUG_RX,
2728 ("%s: RX: Rx buffer allocation failed, "
2729 "dropping packet%s\n", device_xname(sc->sc_dev),
2730 sc->sc_rxdiscard ? " (discard)" : ""));
2731 continue;
2732 }
2733
2734 m->m_len = len;
2735 sc->sc_rxlen += len;
2736 DPRINTF(WM_DEBUG_RX,
2737 ("%s: RX: buffer at %p len %d\n",
2738 device_xname(sc->sc_dev), m->m_data, len));
2739
2740 /*
2741 * If this is not the end of the packet, keep
2742 * looking.
2743 */
2744 if ((status & WRX_ST_EOP) == 0) {
2745 WM_RXCHAIN_LINK(sc, m);
2746 DPRINTF(WM_DEBUG_RX,
2747 ("%s: RX: not yet EOP, rxlen -> %d\n",
2748 device_xname(sc->sc_dev), sc->sc_rxlen));
2749 continue;
2750 }
2751
2752 /*
2753 * Okay, we have the entire packet now. The chip is
2754 * configured to include the FCS (not all chips can
2755 * be configured to strip it), so we need to trim it.
2756 * May need to adjust length of previous mbuf in the
2757 * chain if the current mbuf is too short.
2758 */
2759 if (m->m_len < ETHER_CRC_LEN) {
2760 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2761 m->m_len = 0;
2762 } else {
2763 m->m_len -= ETHER_CRC_LEN;
2764 }
2765 len = sc->sc_rxlen - ETHER_CRC_LEN;
2766
2767 WM_RXCHAIN_LINK(sc, m);
2768
2769 *sc->sc_rxtailp = NULL;
2770 m = sc->sc_rxhead;
2771
2772 WM_RXCHAIN_RESET(sc);
2773
2774 DPRINTF(WM_DEBUG_RX,
2775 ("%s: RX: have entire packet, len -> %d\n",
2776 device_xname(sc->sc_dev), len));
2777
2778 /*
2779 * If an error occurred, update stats and drop the packet.
2780 */
2781 if (errors &
2782 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2783 ifp->if_ierrors++;
2784 if (errors & WRX_ER_SE)
2785 log(LOG_WARNING, "%s: symbol error\n",
2786 device_xname(sc->sc_dev));
2787 else if (errors & WRX_ER_SEQ)
2788 log(LOG_WARNING, "%s: receive sequence error\n",
2789 device_xname(sc->sc_dev));
2790 else if (errors & WRX_ER_CE)
2791 log(LOG_WARNING, "%s: CRC error\n",
2792 device_xname(sc->sc_dev));
2793 m_freem(m);
2794 continue;
2795 }
2796
2797 /*
2798 * No errors. Receive the packet.
2799 */
2800 m->m_pkthdr.rcvif = ifp;
2801 m->m_pkthdr.len = len;
2802
2803 /*
2804 * If VLANs are enabled, VLAN packets have been unwrapped
2805 * for us. Associate the tag with the packet.
2806 */
2807 if ((status & WRX_ST_VP) != 0) {
2808 VLAN_INPUT_TAG(ifp, m,
2809 le16toh(vlantag),
2810 continue);
2811 }
2812
2813 /*
2814 * Set up checksum info for this packet.
2815 */
2816 if ((status & WRX_ST_IXSM) == 0) {
2817 if (status & WRX_ST_IPCS) {
2818 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2819 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2820 if (errors & WRX_ER_IPE)
2821 m->m_pkthdr.csum_flags |=
2822 M_CSUM_IPv4_BAD;
2823 }
2824 if (status & WRX_ST_TCPCS) {
2825 /*
2826 * Note: we don't know if this was TCP or UDP,
2827 * so we just set both bits, and expect the
2828 * upper layers to deal.
2829 */
2830 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2831 m->m_pkthdr.csum_flags |=
2832 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2833 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2834 if (errors & WRX_ER_TCPE)
2835 m->m_pkthdr.csum_flags |=
2836 M_CSUM_TCP_UDP_BAD;
2837 }
2838 }
2839
2840 ifp->if_ipackets++;
2841
2842 #if NBPFILTER > 0
2843 /* Pass this up to any BPF listeners. */
2844 if (ifp->if_bpf)
2845 bpf_mtap(ifp->if_bpf, m);
2846 #endif /* NBPFILTER > 0 */
2847
2848 /* Pass it on. */
2849 (*ifp->if_input)(ifp, m);
2850 }
2851
2852 /* Update the receive pointer. */
2853 sc->sc_rxptr = i;
2854
2855 DPRINTF(WM_DEBUG_RX,
2856 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2857 }
2858
2859 /*
2860 * wm_linkintr:
2861 *
2862 * Helper; handle link interrupts.
2863 */
2864 static void
2865 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2866 {
2867 uint32_t status;
2868
2869 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2870 __func__));
2871 /*
2872 * If we get a link status interrupt on a 1000BASE-T
2873 * device, just fall into the normal MII tick path.
2874 */
2875 if (sc->sc_flags & WM_F_HAS_MII) {
2876 if (icr & ICR_LSC) {
2877 DPRINTF(WM_DEBUG_LINK,
2878 ("%s: LINK: LSC -> mii_tick\n",
2879 device_xname(sc->sc_dev)));
2880 mii_tick(&sc->sc_mii);
2881 if (sc->sc_type == WM_T_82543) {
2882 int miistatus, active;
2883
2884 /*
2885 * With 82543, we need to force speed and
2886 * duplex on the MAC equal to what the PHY
2887 * speed and duplex configuration is.
2888 */
2889 miistatus = sc->sc_mii.mii_media_status;
2890
2891 if (miistatus & IFM_ACTIVE) {
2892 active = sc->sc_mii.mii_media_active;
2893 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2894 | CTRL_FD);
2895 switch (IFM_SUBTYPE(active)) {
2896 case IFM_10_T:
2897 sc->sc_ctrl |= CTRL_SPEED_10;
2898 break;
2899 case IFM_100_TX:
2900 sc->sc_ctrl |= CTRL_SPEED_100;
2901 break;
2902 case IFM_1000_T:
2903 sc->sc_ctrl |= CTRL_SPEED_1000;
2904 break;
2905 default:
2906 /*
2907 * fiber?
2908 * Shoud not enter here.
2909 */
2910 printf("unknown media (%x)\n",
2911 active);
2912 break;
2913 }
2914 if (active & IFM_FDX)
2915 sc->sc_ctrl |= CTRL_FD;
2916 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2917 }
2918 }
2919 } else if (icr & ICR_RXSEQ) {
2920 DPRINTF(WM_DEBUG_LINK,
2921 ("%s: LINK Receive sequence error\n",
2922 device_xname(sc->sc_dev)));
2923 }
2924 return;
2925 }
2926
2927 status = CSR_READ(sc, WMREG_STATUS);
2928 if (icr & ICR_LSC) {
2929 if (status & STATUS_LU) {
2930 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2931 device_xname(sc->sc_dev),
2932 (status & STATUS_FD) ? "FDX" : "HDX"));
2933 /*
2934 * NOTE: CTRL will update TFCE and RFCE automatically,
2935 * so we should update sc->sc_ctrl
2936 */
2937
2938 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2939 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2940 sc->sc_fcrtl &= ~FCRTL_XONE;
2941 if (status & STATUS_FD)
2942 sc->sc_tctl |=
2943 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2944 else
2945 sc->sc_tctl |=
2946 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2947 if (sc->sc_ctrl & CTRL_TFCE)
2948 sc->sc_fcrtl |= FCRTL_XONE;
2949 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2950 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2951 WMREG_OLD_FCRTL : WMREG_FCRTL,
2952 sc->sc_fcrtl);
2953 sc->sc_tbi_linkup = 1;
2954 } else {
2955 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2956 device_xname(sc->sc_dev)));
2957 sc->sc_tbi_linkup = 0;
2958 }
2959 wm_tbi_set_linkled(sc);
2960 } else if (icr & ICR_RXCFG) {
2961 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2962 device_xname(sc->sc_dev)));
2963 sc->sc_tbi_nrxcfg++;
2964 wm_check_for_link(sc);
2965 } else if (icr & ICR_RXSEQ) {
2966 DPRINTF(WM_DEBUG_LINK,
2967 ("%s: LINK: Receive sequence error\n",
2968 device_xname(sc->sc_dev)));
2969 }
2970 }
2971
2972 /*
2973 * wm_tick:
2974 *
2975 * One second timer, used to check link status, sweep up
2976 * completed transmit jobs, etc.
2977 */
2978 static void
2979 wm_tick(void *arg)
2980 {
2981 struct wm_softc *sc = arg;
2982 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2983 int s;
2984
2985 s = splnet();
2986
2987 if (sc->sc_type >= WM_T_82542_2_1) {
2988 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2989 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2990 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2991 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2992 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2993 }
2994
2995 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2996 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2997
2998 if (sc->sc_flags & WM_F_HAS_MII)
2999 mii_tick(&sc->sc_mii);
3000 else
3001 wm_tbi_check_link(sc);
3002
3003 splx(s);
3004
3005 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3006 }
3007
3008 /*
3009 * wm_reset:
3010 *
3011 * Reset the i82542 chip.
3012 */
3013 static void
3014 wm_reset(struct wm_softc *sc)
3015 {
3016 uint32_t reg;
3017
3018 /*
3019 * Allocate on-chip memory according to the MTU size.
3020 * The Packet Buffer Allocation register must be written
3021 * before the chip is reset.
3022 */
3023 switch (sc->sc_type) {
3024 case WM_T_82547:
3025 case WM_T_82547_2:
3026 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3027 PBA_22K : PBA_30K;
3028 sc->sc_txfifo_head = 0;
3029 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3030 sc->sc_txfifo_size =
3031 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3032 sc->sc_txfifo_stall = 0;
3033 break;
3034 case WM_T_82571:
3035 case WM_T_82572:
3036 case WM_T_80003:
3037 sc->sc_pba = PBA_32K;
3038 break;
3039 case WM_T_82573:
3040 sc->sc_pba = PBA_12K;
3041 break;
3042 case WM_T_82574:
3043 case WM_T_82583:
3044 sc->sc_pba = PBA_20K;
3045 break;
3046 case WM_T_ICH8:
3047 sc->sc_pba = PBA_8K;
3048 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3049 break;
3050 case WM_T_ICH9:
3051 case WM_T_ICH10:
3052 sc->sc_pba = PBA_10K;
3053 break;
3054 default:
3055 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3056 PBA_40K : PBA_48K;
3057 break;
3058 }
3059 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3060
3061 if (sc->sc_flags & WM_F_PCIE) {
3062 int timeout = 800;
3063
3064 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3065 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3066
3067 while (timeout--) {
3068 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3069 break;
3070 delay(100);
3071 }
3072 }
3073
3074 /* clear interrupt */
3075 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3076
3077 /*
3078 * 82541 Errata 29? & 82547 Errata 28?
3079 * See also the description about PHY_RST bit in CTRL register
3080 * in 8254x_GBe_SDM.pdf.
3081 */
3082 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3083 CSR_WRITE(sc, WMREG_CTRL,
3084 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3085 delay(5000);
3086 }
3087
3088 switch (sc->sc_type) {
3089 case WM_T_82544:
3090 case WM_T_82540:
3091 case WM_T_82545:
3092 case WM_T_82546:
3093 case WM_T_82541:
3094 case WM_T_82541_2:
3095 /*
3096 * On some chipsets, a reset through a memory-mapped write
3097 * cycle can cause the chip to reset before completing the
3098 * write cycle. This causes major headache that can be
3099 * avoided by issuing the reset via indirect register writes
3100 * through I/O space.
3101 *
3102 * So, if we successfully mapped the I/O BAR at attach time,
3103 * use that. Otherwise, try our luck with a memory-mapped
3104 * reset.
3105 */
3106 if (sc->sc_flags & WM_F_IOH_VALID)
3107 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3108 else
3109 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3110 break;
3111
3112 case WM_T_82545_3:
3113 case WM_T_82546_3:
3114 /* Use the shadow control register on these chips. */
3115 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3116 break;
3117
3118 case WM_T_ICH8:
3119 case WM_T_ICH9:
3120 case WM_T_ICH10:
3121 wm_get_swfwhw_semaphore(sc);
3122 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3123 delay(10000);
3124
3125 default:
3126 /* Everything else can safely use the documented method. */
3127 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3128 break;
3129 }
3130 delay(10000);
3131
3132 /* reload EEPROM */
3133 switch(sc->sc_type) {
3134 case WM_T_82542_2_0:
3135 case WM_T_82542_2_1:
3136 case WM_T_82543:
3137 case WM_T_82544:
3138 delay(10);
3139 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3140 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3141 delay(2000);
3142 break;
3143 case WM_T_82541:
3144 case WM_T_82541_2:
3145 case WM_T_82547:
3146 case WM_T_82547_2:
3147 delay(20000);
3148 break;
3149 case WM_T_82573:
3150 case WM_T_82574:
3151 case WM_T_82583:
3152 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3153 delay(10);
3154 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3155 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3156 }
3157 /* FALLTHROUGH */
3158 default:
3159 /* check EECD_EE_AUTORD */
3160 wm_get_auto_rd_done(sc);
3161 }
3162
3163 /* reload sc_ctrl */
3164 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3165
3166 #if 0
3167 for (i = 0; i < 1000; i++) {
3168 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3169 return;
3170 }
3171 delay(20);
3172 }
3173
3174 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3175 log(LOG_ERR, "%s: reset failed to complete\n",
3176 device_xname(sc->sc_dev));
3177 #endif
3178 }
3179
3180 /*
3181 * wm_init: [ifnet interface function]
3182 *
3183 * Initialize the interface. Must be called at splnet().
3184 */
3185 static int
3186 wm_init(struct ifnet *ifp)
3187 {
3188 struct wm_softc *sc = ifp->if_softc;
3189 struct wm_rxsoft *rxs;
3190 int i, error = 0;
3191 uint32_t reg;
3192
3193 /*
3194 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3195 * There is a small but measurable benefit to avoiding the adjusment
3196 * of the descriptor so that the headers are aligned, for normal mtu,
3197 * on such platforms. One possibility is that the DMA itself is
3198 * slightly more efficient if the front of the entire packet (instead
3199 * of the front of the headers) is aligned.
3200 *
3201 * Note we must always set align_tweak to 0 if we are using
3202 * jumbo frames.
3203 */
3204 #ifdef __NO_STRICT_ALIGNMENT
3205 sc->sc_align_tweak = 0;
3206 #else
3207 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3208 sc->sc_align_tweak = 0;
3209 else
3210 sc->sc_align_tweak = 2;
3211 #endif /* __NO_STRICT_ALIGNMENT */
3212
3213 /* Cancel any pending I/O. */
3214 wm_stop(ifp, 0);
3215
3216 /* update statistics before reset */
3217 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3218 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3219
3220 /* Reset the chip to a known state. */
3221 wm_reset(sc);
3222
3223 switch (sc->sc_type) {
3224 case WM_T_82571:
3225 case WM_T_82572:
3226 case WM_T_82573:
3227 case WM_T_82574:
3228 case WM_T_82583:
3229 case WM_T_80003:
3230 case WM_T_ICH8:
3231 case WM_T_ICH9:
3232 case WM_T_ICH10:
3233 if (wm_check_mng_mode(sc) != 0)
3234 wm_get_hw_control(sc);
3235 break;
3236 default:
3237 break;
3238 }
3239
3240 /* Initialize the transmit descriptor ring. */
3241 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3242 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3243 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3244 sc->sc_txfree = WM_NTXDESC(sc);
3245 sc->sc_txnext = 0;
3246
3247 if (sc->sc_type < WM_T_82543) {
3248 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3249 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3250 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3251 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3252 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3253 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3254 } else {
3255 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3256 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3257 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3258 CSR_WRITE(sc, WMREG_TDH, 0);
3259 CSR_WRITE(sc, WMREG_TDT, 0);
3260 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3261 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3262
3263 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3264 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3265 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3266 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3267 }
3268 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3269 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3270
3271 /* Initialize the transmit job descriptors. */
3272 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3273 sc->sc_txsoft[i].txs_mbuf = NULL;
3274 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3275 sc->sc_txsnext = 0;
3276 sc->sc_txsdirty = 0;
3277
3278 /*
3279 * Initialize the receive descriptor and receive job
3280 * descriptor rings.
3281 */
3282 if (sc->sc_type < WM_T_82543) {
3283 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3284 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3285 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3286 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3287 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3288 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3289
3290 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3291 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3292 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3293 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3294 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3295 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3296 } else {
3297 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3298 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3299 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3300 CSR_WRITE(sc, WMREG_RDH, 0);
3301 CSR_WRITE(sc, WMREG_RDT, 0);
3302 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3303 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3304 }
3305 for (i = 0; i < WM_NRXDESC; i++) {
3306 rxs = &sc->sc_rxsoft[i];
3307 if (rxs->rxs_mbuf == NULL) {
3308 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3309 log(LOG_ERR, "%s: unable to allocate or map rx "
3310 "buffer %d, error = %d\n",
3311 device_xname(sc->sc_dev), i, error);
3312 /*
3313 * XXX Should attempt to run with fewer receive
3314 * XXX buffers instead of just failing.
3315 */
3316 wm_rxdrain(sc);
3317 goto out;
3318 }
3319 } else
3320 WM_INIT_RXDESC(sc, i);
3321 }
3322 sc->sc_rxptr = 0;
3323 sc->sc_rxdiscard = 0;
3324 WM_RXCHAIN_RESET(sc);
3325
3326 /*
3327 * Clear out the VLAN table -- we don't use it (yet).
3328 */
3329 CSR_WRITE(sc, WMREG_VET, 0);
3330 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3331 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3332
3333 /*
3334 * Set up flow-control parameters.
3335 *
3336 * XXX Values could probably stand some tuning.
3337 */
3338 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3339 && (sc->sc_type != WM_T_ICH10)) {
3340 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3341 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3342 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3343 }
3344
3345 sc->sc_fcrtl = FCRTL_DFLT;
3346 if (sc->sc_type < WM_T_82543) {
3347 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3348 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3349 } else {
3350 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3351 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3352 }
3353
3354 if (sc->sc_type == WM_T_80003)
3355 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3356 else
3357 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3358
3359 /* Deal with VLAN enables. */
3360 if (VLAN_ATTACHED(&sc->sc_ethercom))
3361 sc->sc_ctrl |= CTRL_VME;
3362 else
3363 sc->sc_ctrl &= ~CTRL_VME;
3364
3365 /* Write the control registers. */
3366 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3367
3368 if (sc->sc_flags & WM_F_HAS_MII) {
3369 int val;
3370
3371 switch (sc->sc_type) {
3372 case WM_T_80003:
3373 case WM_T_ICH8:
3374 case WM_T_ICH9:
3375 case WM_T_ICH10:
3376 /*
3377 * Set the mac to wait the maximum time between each
3378 * iteration and increase the max iterations when
3379 * polling the phy; this fixes erroneous timeouts at
3380 * 10Mbps.
3381 */
3382 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3383 0xFFFF);
3384 val = wm_kmrn_readreg(sc,
3385 KUMCTRLSTA_OFFSET_INB_PARAM);
3386 val |= 0x3F;
3387 wm_kmrn_writereg(sc,
3388 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3389 break;
3390 default:
3391 break;
3392 }
3393
3394 if (sc->sc_type == WM_T_80003) {
3395 val = CSR_READ(sc, WMREG_CTRL_EXT);
3396 val &= ~CTRL_EXT_LINK_MODE_MASK;
3397 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3398
3399 /* Bypass RX and TX FIFO's */
3400 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3401 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3402 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3403
3404 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3405 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3406 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3407 }
3408 }
3409 #if 0
3410 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3411 #endif
3412
3413 /*
3414 * Set up checksum offload parameters.
3415 */
3416 reg = CSR_READ(sc, WMREG_RXCSUM);
3417 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3418 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3419 reg |= RXCSUM_IPOFL;
3420 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3421 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3422 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3423 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3424 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3425
3426 /* Reset TBI's RXCFG count */
3427 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3428
3429 /*
3430 * Set up the interrupt registers.
3431 */
3432 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3433 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3434 ICR_RXO | ICR_RXT0;
3435 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3436 sc->sc_icr |= ICR_RXCFG;
3437 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3438
3439 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3440 || (sc->sc_type == WM_T_ICH10)) {
3441 reg = CSR_READ(sc, WMREG_KABGTXD);
3442 reg |= KABGTXD_BGSQLBIAS;
3443 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3444 }
3445
3446 /* Set up the inter-packet gap. */
3447 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3448
3449 if (sc->sc_type >= WM_T_82543) {
3450 /*
3451 * Set up the interrupt throttling register (units of 256ns)
3452 * Note that a footnote in Intel's documentation says this
3453 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3454 * or 10Mbit mode. Empirically, it appears to be the case
3455 * that that is also true for the 1024ns units of the other
3456 * interrupt-related timer registers -- so, really, we ought
3457 * to divide this value by 4 when the link speed is low.
3458 *
3459 * XXX implement this division at link speed change!
3460 */
3461
3462 /*
3463 * For N interrupts/sec, set this value to:
3464 * 1000000000 / (N * 256). Note that we set the
3465 * absolute and packet timer values to this value
3466 * divided by 4 to get "simple timer" behavior.
3467 */
3468
3469 sc->sc_itr = 1500; /* 2604 ints/sec */
3470 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3471 }
3472
3473 /* Set the VLAN ethernetype. */
3474 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3475
3476 /*
3477 * Set up the transmit control register; we start out with
3478 * a collision distance suitable for FDX, but update it whe
3479 * we resolve the media type.
3480 */
3481 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3482 | TCTL_CT(TX_COLLISION_THRESHOLD)
3483 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3484 if (sc->sc_type >= WM_T_82571)
3485 sc->sc_tctl |= TCTL_MULR;
3486 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3487
3488 if (sc->sc_type == WM_T_80003) {
3489 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3490 reg &= ~TCTL_EXT_GCEX_MASK;
3491 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3492 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3493 }
3494
3495 /* Set the media. */
3496 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3497 goto out;
3498
3499 /*
3500 * Set up the receive control register; we actually program
3501 * the register when we set the receive filter. Use multicast
3502 * address offset type 0.
3503 *
3504 * Only the i82544 has the ability to strip the incoming
3505 * CRC, so we don't enable that feature.
3506 */
3507 sc->sc_mchash_type = 0;
3508 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3509 | RCTL_MO(sc->sc_mchash_type);
3510
3511 if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
3512 && (ifp->if_mtu > ETHERMTU))
3513 sc->sc_rctl |= RCTL_LPE;
3514
3515 if (MCLBYTES == 2048) {
3516 sc->sc_rctl |= RCTL_2k;
3517 } else {
3518 if (sc->sc_type >= WM_T_82543) {
3519 switch(MCLBYTES) {
3520 case 4096:
3521 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3522 break;
3523 case 8192:
3524 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3525 break;
3526 case 16384:
3527 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3528 break;
3529 default:
3530 panic("wm_init: MCLBYTES %d unsupported",
3531 MCLBYTES);
3532 break;
3533 }
3534 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3535 }
3536
3537 /* Set the receive filter. */
3538 wm_set_filter(sc);
3539
3540 /* Start the one second link check clock. */
3541 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3542
3543 /* ...all done! */
3544 ifp->if_flags |= IFF_RUNNING;
3545 ifp->if_flags &= ~IFF_OACTIVE;
3546
3547 out:
3548 if (error)
3549 log(LOG_ERR, "%s: interface not running\n",
3550 device_xname(sc->sc_dev));
3551 return (error);
3552 }
3553
3554 /*
3555 * wm_rxdrain:
3556 *
3557 * Drain the receive queue.
3558 */
3559 static void
3560 wm_rxdrain(struct wm_softc *sc)
3561 {
3562 struct wm_rxsoft *rxs;
3563 int i;
3564
3565 for (i = 0; i < WM_NRXDESC; i++) {
3566 rxs = &sc->sc_rxsoft[i];
3567 if (rxs->rxs_mbuf != NULL) {
3568 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3569 m_freem(rxs->rxs_mbuf);
3570 rxs->rxs_mbuf = NULL;
3571 }
3572 }
3573 }
3574
3575 /*
3576 * wm_stop: [ifnet interface function]
3577 *
3578 * Stop transmission on the interface.
3579 */
3580 static void
3581 wm_stop(struct ifnet *ifp, int disable)
3582 {
3583 struct wm_softc *sc = ifp->if_softc;
3584 struct wm_txsoft *txs;
3585 int i;
3586
3587 /* Stop the one second clock. */
3588 callout_stop(&sc->sc_tick_ch);
3589
3590 /* Stop the 82547 Tx FIFO stall check timer. */
3591 if (sc->sc_type == WM_T_82547)
3592 callout_stop(&sc->sc_txfifo_ch);
3593
3594 if (sc->sc_flags & WM_F_HAS_MII) {
3595 /* Down the MII. */
3596 mii_down(&sc->sc_mii);
3597 } else {
3598 #if 0
3599 /* Should we clear PHY's status properly? */
3600 wm_reset(sc);
3601 #endif
3602 }
3603
3604 /* Stop the transmit and receive processes. */
3605 CSR_WRITE(sc, WMREG_TCTL, 0);
3606 CSR_WRITE(sc, WMREG_RCTL, 0);
3607
3608 /*
3609 * Clear the interrupt mask to ensure the device cannot assert its
3610 * interrupt line.
3611 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3612 * any currently pending or shared interrupt.
3613 */
3614 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3615 sc->sc_icr = 0;
3616
3617 /* Release any queued transmit buffers. */
3618 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3619 txs = &sc->sc_txsoft[i];
3620 if (txs->txs_mbuf != NULL) {
3621 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3622 m_freem(txs->txs_mbuf);
3623 txs->txs_mbuf = NULL;
3624 }
3625 }
3626
3627 /* Mark the interface as down and cancel the watchdog timer. */
3628 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3629 ifp->if_timer = 0;
3630
3631 if (disable)
3632 wm_rxdrain(sc);
3633 }
3634
3635 void
3636 wm_get_auto_rd_done(struct wm_softc *sc)
3637 {
3638 int i;
3639
3640 /* wait for eeprom to reload */
3641 switch (sc->sc_type) {
3642 case WM_T_82571:
3643 case WM_T_82572:
3644 case WM_T_82573:
3645 case WM_T_82574:
3646 case WM_T_82583:
3647 case WM_T_80003:
3648 case WM_T_ICH8:
3649 case WM_T_ICH9:
3650 case WM_T_ICH10:
3651 for (i = 10; i > 0; i--) {
3652 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3653 break;
3654 delay(1000);
3655 }
3656 if (i == 0) {
3657 log(LOG_ERR, "%s: auto read from eeprom failed to "
3658 "complete\n", device_xname(sc->sc_dev));
3659 }
3660 break;
3661 default:
3662 delay(5000);
3663 break;
3664 }
3665
3666 /* Phy configuration starts after EECD_AUTO_RD is set */
3667 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
3668 || sc->sc_type == WM_T_82574)
3669 delay(25000);
3670 }
3671
3672 /*
3673 * wm_acquire_eeprom:
3674 *
3675 * Perform the EEPROM handshake required on some chips.
3676 */
3677 static int
3678 wm_acquire_eeprom(struct wm_softc *sc)
3679 {
3680 uint32_t reg;
3681 int x;
3682 int ret = 0;
3683
3684 /* always success */
3685 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3686 return 0;
3687
3688 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3689 ret = wm_get_swfwhw_semaphore(sc);
3690 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3691 /* this will also do wm_get_swsm_semaphore() if needed */
3692 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3693 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3694 ret = wm_get_swsm_semaphore(sc);
3695 }
3696
3697 if (ret) {
3698 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3699 __func__);
3700 return 1;
3701 }
3702
3703 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3704 reg = CSR_READ(sc, WMREG_EECD);
3705
3706 /* Request EEPROM access. */
3707 reg |= EECD_EE_REQ;
3708 CSR_WRITE(sc, WMREG_EECD, reg);
3709
3710 /* ..and wait for it to be granted. */
3711 for (x = 0; x < 1000; x++) {
3712 reg = CSR_READ(sc, WMREG_EECD);
3713 if (reg & EECD_EE_GNT)
3714 break;
3715 delay(5);
3716 }
3717 if ((reg & EECD_EE_GNT) == 0) {
3718 aprint_error_dev(sc->sc_dev,
3719 "could not acquire EEPROM GNT\n");
3720 reg &= ~EECD_EE_REQ;
3721 CSR_WRITE(sc, WMREG_EECD, reg);
3722 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3723 wm_put_swfwhw_semaphore(sc);
3724 if (sc->sc_flags & WM_F_SWFW_SYNC)
3725 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3726 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3727 wm_put_swsm_semaphore(sc);
3728 return (1);
3729 }
3730 }
3731
3732 return (0);
3733 }
3734
3735 /*
3736 * wm_release_eeprom:
3737 *
3738 * Release the EEPROM mutex.
3739 */
3740 static void
3741 wm_release_eeprom(struct wm_softc *sc)
3742 {
3743 uint32_t reg;
3744
3745 /* always success */
3746 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3747 return;
3748
3749 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3750 reg = CSR_READ(sc, WMREG_EECD);
3751 reg &= ~EECD_EE_REQ;
3752 CSR_WRITE(sc, WMREG_EECD, reg);
3753 }
3754
3755 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3756 wm_put_swfwhw_semaphore(sc);
3757 if (sc->sc_flags & WM_F_SWFW_SYNC)
3758 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3759 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3760 wm_put_swsm_semaphore(sc);
3761 }
3762
3763 /*
3764 * wm_eeprom_sendbits:
3765 *
3766 * Send a series of bits to the EEPROM.
3767 */
3768 static void
3769 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3770 {
3771 uint32_t reg;
3772 int x;
3773
3774 reg = CSR_READ(sc, WMREG_EECD);
3775
3776 for (x = nbits; x > 0; x--) {
3777 if (bits & (1U << (x - 1)))
3778 reg |= EECD_DI;
3779 else
3780 reg &= ~EECD_DI;
3781 CSR_WRITE(sc, WMREG_EECD, reg);
3782 delay(2);
3783 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3784 delay(2);
3785 CSR_WRITE(sc, WMREG_EECD, reg);
3786 delay(2);
3787 }
3788 }
3789
3790 /*
3791 * wm_eeprom_recvbits:
3792 *
3793 * Receive a series of bits from the EEPROM.
3794 */
3795 static void
3796 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3797 {
3798 uint32_t reg, val;
3799 int x;
3800
3801 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3802
3803 val = 0;
3804 for (x = nbits; x > 0; x--) {
3805 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3806 delay(2);
3807 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3808 val |= (1U << (x - 1));
3809 CSR_WRITE(sc, WMREG_EECD, reg);
3810 delay(2);
3811 }
3812 *valp = val;
3813 }
3814
3815 /*
3816 * wm_read_eeprom_uwire:
3817 *
3818 * Read a word from the EEPROM using the MicroWire protocol.
3819 */
3820 static int
3821 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3822 {
3823 uint32_t reg, val;
3824 int i;
3825
3826 for (i = 0; i < wordcnt; i++) {
3827 /* Clear SK and DI. */
3828 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3829 CSR_WRITE(sc, WMREG_EECD, reg);
3830
3831 /* Set CHIP SELECT. */
3832 reg |= EECD_CS;
3833 CSR_WRITE(sc, WMREG_EECD, reg);
3834 delay(2);
3835
3836 /* Shift in the READ command. */
3837 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3838
3839 /* Shift in address. */
3840 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3841
3842 /* Shift out the data. */
3843 wm_eeprom_recvbits(sc, &val, 16);
3844 data[i] = val & 0xffff;
3845
3846 /* Clear CHIP SELECT. */
3847 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3848 CSR_WRITE(sc, WMREG_EECD, reg);
3849 delay(2);
3850 }
3851
3852 return (0);
3853 }
3854
3855 /*
3856 * wm_spi_eeprom_ready:
3857 *
3858 * Wait for a SPI EEPROM to be ready for commands.
3859 */
3860 static int
3861 wm_spi_eeprom_ready(struct wm_softc *sc)
3862 {
3863 uint32_t val;
3864 int usec;
3865
3866 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3867 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3868 wm_eeprom_recvbits(sc, &val, 8);
3869 if ((val & SPI_SR_RDY) == 0)
3870 break;
3871 }
3872 if (usec >= SPI_MAX_RETRIES) {
3873 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3874 return (1);
3875 }
3876 return (0);
3877 }
3878
3879 /*
3880 * wm_read_eeprom_spi:
3881 *
3882 * Read a work from the EEPROM using the SPI protocol.
3883 */
3884 static int
3885 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3886 {
3887 uint32_t reg, val;
3888 int i;
3889 uint8_t opc;
3890
3891 /* Clear SK and CS. */
3892 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3893 CSR_WRITE(sc, WMREG_EECD, reg);
3894 delay(2);
3895
3896 if (wm_spi_eeprom_ready(sc))
3897 return (1);
3898
3899 /* Toggle CS to flush commands. */
3900 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3901 delay(2);
3902 CSR_WRITE(sc, WMREG_EECD, reg);
3903 delay(2);
3904
3905 opc = SPI_OPC_READ;
3906 if (sc->sc_ee_addrbits == 8 && word >= 128)
3907 opc |= SPI_OPC_A8;
3908
3909 wm_eeprom_sendbits(sc, opc, 8);
3910 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3911
3912 for (i = 0; i < wordcnt; i++) {
3913 wm_eeprom_recvbits(sc, &val, 16);
3914 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3915 }
3916
3917 /* Raise CS and clear SK. */
3918 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3919 CSR_WRITE(sc, WMREG_EECD, reg);
3920 delay(2);
3921
3922 return (0);
3923 }
3924
3925 #define EEPROM_CHECKSUM 0xBABA
3926 #define EEPROM_SIZE 0x0040
3927
3928 /*
3929 * wm_validate_eeprom_checksum
3930 *
3931 * The checksum is defined as the sum of the first 64 (16 bit) words.
3932 */
3933 static int
3934 wm_validate_eeprom_checksum(struct wm_softc *sc)
3935 {
3936 uint16_t checksum;
3937 uint16_t eeprom_data;
3938 int i;
3939
3940 checksum = 0;
3941
3942 for (i = 0; i < EEPROM_SIZE; i++) {
3943 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3944 return 1;
3945 checksum += eeprom_data;
3946 }
3947
3948 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3949 return 1;
3950
3951 return 0;
3952 }
3953
3954 /*
3955 * wm_read_eeprom:
3956 *
3957 * Read data from the serial EEPROM.
3958 */
3959 static int
3960 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3961 {
3962 int rv;
3963
3964 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3965 return 1;
3966
3967 if (wm_acquire_eeprom(sc))
3968 return 1;
3969
3970 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3971 || (sc->sc_type == WM_T_ICH10))
3972 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3973 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3974 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3975 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3976 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3977 else
3978 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3979
3980 wm_release_eeprom(sc);
3981 return rv;
3982 }
3983
3984 static int
3985 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3986 uint16_t *data)
3987 {
3988 int i, eerd = 0;
3989 int error = 0;
3990
3991 for (i = 0; i < wordcnt; i++) {
3992 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3993
3994 CSR_WRITE(sc, WMREG_EERD, eerd);
3995 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3996 if (error != 0)
3997 break;
3998
3999 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
4000 }
4001
4002 return error;
4003 }
4004
4005 static int
4006 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
4007 {
4008 uint32_t attempts = 100000;
4009 uint32_t i, reg = 0;
4010 int32_t done = -1;
4011
4012 for (i = 0; i < attempts; i++) {
4013 reg = CSR_READ(sc, rw);
4014
4015 if (reg & EERD_DONE) {
4016 done = 0;
4017 break;
4018 }
4019 delay(5);
4020 }
4021
4022 return done;
4023 }
4024
4025 /*
4026 * wm_add_rxbuf:
4027 *
4028 * Add a receive buffer to the indiciated descriptor.
4029 */
4030 static int
4031 wm_add_rxbuf(struct wm_softc *sc, int idx)
4032 {
4033 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4034 struct mbuf *m;
4035 int error;
4036
4037 MGETHDR(m, M_DONTWAIT, MT_DATA);
4038 if (m == NULL)
4039 return (ENOBUFS);
4040
4041 MCLGET(m, M_DONTWAIT);
4042 if ((m->m_flags & M_EXT) == 0) {
4043 m_freem(m);
4044 return (ENOBUFS);
4045 }
4046
4047 if (rxs->rxs_mbuf != NULL)
4048 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4049
4050 rxs->rxs_mbuf = m;
4051
4052 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4053 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4054 BUS_DMA_READ|BUS_DMA_NOWAIT);
4055 if (error) {
4056 /* XXX XXX XXX */
4057 aprint_error_dev(sc->sc_dev,
4058 "unable to load rx DMA map %d, error = %d\n",
4059 idx, error);
4060 panic("wm_add_rxbuf");
4061 }
4062
4063 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4064 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4065
4066 WM_INIT_RXDESC(sc, idx);
4067
4068 return (0);
4069 }
4070
4071 /*
4072 * wm_set_ral:
4073 *
4074 * Set an entery in the receive address list.
4075 */
4076 static void
4077 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4078 {
4079 uint32_t ral_lo, ral_hi;
4080
4081 if (enaddr != NULL) {
4082 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4083 (enaddr[3] << 24);
4084 ral_hi = enaddr[4] | (enaddr[5] << 8);
4085 ral_hi |= RAL_AV;
4086 } else {
4087 ral_lo = 0;
4088 ral_hi = 0;
4089 }
4090
4091 if (sc->sc_type >= WM_T_82544) {
4092 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4093 ral_lo);
4094 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4095 ral_hi);
4096 } else {
4097 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4098 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4099 }
4100 }
4101
4102 /*
4103 * wm_mchash:
4104 *
4105 * Compute the hash of the multicast address for the 4096-bit
4106 * multicast filter.
4107 */
4108 static uint32_t
4109 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4110 {
4111 static const int lo_shift[4] = { 4, 3, 2, 0 };
4112 static const int hi_shift[4] = { 4, 5, 6, 8 };
4113 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4114 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4115 uint32_t hash;
4116
4117 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4118 || (sc->sc_type == WM_T_ICH10)) {
4119 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4120 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4121 return (hash & 0x3ff);
4122 }
4123 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4124 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4125
4126 return (hash & 0xfff);
4127 }
4128
4129 /*
4130 * wm_set_filter:
4131 *
4132 * Set up the receive filter.
4133 */
4134 static void
4135 wm_set_filter(struct wm_softc *sc)
4136 {
4137 struct ethercom *ec = &sc->sc_ethercom;
4138 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4139 struct ether_multi *enm;
4140 struct ether_multistep step;
4141 bus_addr_t mta_reg;
4142 uint32_t hash, reg, bit;
4143 int i, size;
4144
4145 if (sc->sc_type >= WM_T_82544)
4146 mta_reg = WMREG_CORDOVA_MTA;
4147 else
4148 mta_reg = WMREG_MTA;
4149
4150 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4151
4152 if (ifp->if_flags & IFF_BROADCAST)
4153 sc->sc_rctl |= RCTL_BAM;
4154 if (ifp->if_flags & IFF_PROMISC) {
4155 sc->sc_rctl |= RCTL_UPE;
4156 goto allmulti;
4157 }
4158
4159 /*
4160 * Set the station address in the first RAL slot, and
4161 * clear the remaining slots.
4162 */
4163 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4164 || (sc->sc_type == WM_T_ICH10))
4165 size = WM_ICH8_RAL_TABSIZE;
4166 else
4167 size = WM_RAL_TABSIZE;
4168 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4169 for (i = 1; i < size; i++)
4170 wm_set_ral(sc, NULL, i);
4171
4172 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4173 || (sc->sc_type == WM_T_ICH10))
4174 size = WM_ICH8_MC_TABSIZE;
4175 else
4176 size = WM_MC_TABSIZE;
4177 /* Clear out the multicast table. */
4178 for (i = 0; i < size; i++)
4179 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4180
4181 ETHER_FIRST_MULTI(step, ec, enm);
4182 while (enm != NULL) {
4183 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4184 /*
4185 * We must listen to a range of multicast addresses.
4186 * For now, just accept all multicasts, rather than
4187 * trying to set only those filter bits needed to match
4188 * the range. (At this time, the only use of address
4189 * ranges is for IP multicast routing, for which the
4190 * range is big enough to require all bits set.)
4191 */
4192 goto allmulti;
4193 }
4194
4195 hash = wm_mchash(sc, enm->enm_addrlo);
4196
4197 reg = (hash >> 5);
4198 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4199 || (sc->sc_type == WM_T_ICH10))
4200 reg &= 0x1f;
4201 else
4202 reg &= 0x7f;
4203 bit = hash & 0x1f;
4204
4205 hash = CSR_READ(sc, mta_reg + (reg << 2));
4206 hash |= 1U << bit;
4207
4208 /* XXX Hardware bug?? */
4209 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4210 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4211 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4212 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4213 } else
4214 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4215
4216 ETHER_NEXT_MULTI(step, enm);
4217 }
4218
4219 ifp->if_flags &= ~IFF_ALLMULTI;
4220 goto setit;
4221
4222 allmulti:
4223 ifp->if_flags |= IFF_ALLMULTI;
4224 sc->sc_rctl |= RCTL_MPE;
4225
4226 setit:
4227 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4228 }
4229
4230 /*
4231 * wm_tbi_mediainit:
4232 *
4233 * Initialize media for use on 1000BASE-X devices.
4234 */
4235 static void
4236 wm_tbi_mediainit(struct wm_softc *sc)
4237 {
4238 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4239 const char *sep = "";
4240
4241 if (sc->sc_type < WM_T_82543)
4242 sc->sc_tipg = TIPG_WM_DFLT;
4243 else
4244 sc->sc_tipg = TIPG_LG_DFLT;
4245
4246 sc->sc_tbi_anegticks = 5;
4247
4248 /* Initialize our media structures */
4249 sc->sc_mii.mii_ifp = ifp;
4250
4251 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4252 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4253 wm_tbi_mediastatus);
4254
4255 /*
4256 * SWD Pins:
4257 *
4258 * 0 = Link LED (output)
4259 * 1 = Loss Of Signal (input)
4260 */
4261 sc->sc_ctrl |= CTRL_SWDPIO(0);
4262 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4263
4264 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4265
4266 #define ADD(ss, mm, dd) \
4267 do { \
4268 aprint_normal("%s%s", sep, ss); \
4269 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4270 sep = ", "; \
4271 } while (/*CONSTCOND*/0)
4272
4273 aprint_normal_dev(sc->sc_dev, "");
4274 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4275 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4276 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4277 aprint_normal("\n");
4278
4279 #undef ADD
4280
4281 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4282 }
4283
4284 /*
4285 * wm_tbi_mediastatus: [ifmedia interface function]
4286 *
4287 * Get the current interface media status on a 1000BASE-X device.
4288 */
4289 static void
4290 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4291 {
4292 struct wm_softc *sc = ifp->if_softc;
4293 uint32_t ctrl, status;
4294
4295 ifmr->ifm_status = IFM_AVALID;
4296 ifmr->ifm_active = IFM_ETHER;
4297
4298 status = CSR_READ(sc, WMREG_STATUS);
4299 if ((status & STATUS_LU) == 0) {
4300 ifmr->ifm_active |= IFM_NONE;
4301 return;
4302 }
4303
4304 ifmr->ifm_status |= IFM_ACTIVE;
4305 ifmr->ifm_active |= IFM_1000_SX;
4306 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4307 ifmr->ifm_active |= IFM_FDX;
4308 ctrl = CSR_READ(sc, WMREG_CTRL);
4309 if (ctrl & CTRL_RFCE)
4310 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4311 if (ctrl & CTRL_TFCE)
4312 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4313 }
4314
4315 /*
4316 * wm_tbi_mediachange: [ifmedia interface function]
4317 *
4318 * Set hardware to newly-selected media on a 1000BASE-X device.
4319 */
4320 static int
4321 wm_tbi_mediachange(struct ifnet *ifp)
4322 {
4323 struct wm_softc *sc = ifp->if_softc;
4324 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4325 uint32_t status;
4326 int i;
4327
4328 sc->sc_txcw = 0;
4329 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4330 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4331 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4332 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4333 sc->sc_txcw |= TXCW_ANE;
4334 } else {
4335 /*
4336 * If autonegotiation is turned off, force link up and turn on
4337 * full duplex
4338 */
4339 sc->sc_txcw &= ~TXCW_ANE;
4340 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4341 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4342 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4343 delay(1000);
4344 }
4345
4346 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4347 device_xname(sc->sc_dev),sc->sc_txcw));
4348 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4349 delay(10000);
4350
4351 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4352 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4353
4354 /*
4355 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4356 * optics detect a signal, 0 if they don't.
4357 */
4358 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4359 /* Have signal; wait for the link to come up. */
4360
4361 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4362 /*
4363 * Reset the link, and let autonegotiation do its thing
4364 */
4365 sc->sc_ctrl |= CTRL_LRST;
4366 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4367 delay(1000);
4368 sc->sc_ctrl &= ~CTRL_LRST;
4369 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4370 delay(1000);
4371 }
4372
4373 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4374 delay(10000);
4375 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4376 break;
4377 }
4378
4379 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4380 device_xname(sc->sc_dev),i));
4381
4382 status = CSR_READ(sc, WMREG_STATUS);
4383 DPRINTF(WM_DEBUG_LINK,
4384 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4385 device_xname(sc->sc_dev),status, STATUS_LU));
4386 if (status & STATUS_LU) {
4387 /* Link is up. */
4388 DPRINTF(WM_DEBUG_LINK,
4389 ("%s: LINK: set media -> link up %s\n",
4390 device_xname(sc->sc_dev),
4391 (status & STATUS_FD) ? "FDX" : "HDX"));
4392
4393 /*
4394 * NOTE: CTRL will update TFCE and RFCE automatically,
4395 * so we should update sc->sc_ctrl
4396 */
4397 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4398 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4399 sc->sc_fcrtl &= ~FCRTL_XONE;
4400 if (status & STATUS_FD)
4401 sc->sc_tctl |=
4402 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4403 else
4404 sc->sc_tctl |=
4405 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4406 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4407 sc->sc_fcrtl |= FCRTL_XONE;
4408 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4409 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4410 WMREG_OLD_FCRTL : WMREG_FCRTL,
4411 sc->sc_fcrtl);
4412 sc->sc_tbi_linkup = 1;
4413 } else {
4414 if (i == WM_LINKUP_TIMEOUT)
4415 wm_check_for_link(sc);
4416 /* Link is down. */
4417 DPRINTF(WM_DEBUG_LINK,
4418 ("%s: LINK: set media -> link down\n",
4419 device_xname(sc->sc_dev)));
4420 sc->sc_tbi_linkup = 0;
4421 }
4422 } else {
4423 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4424 device_xname(sc->sc_dev)));
4425 sc->sc_tbi_linkup = 0;
4426 }
4427
4428 wm_tbi_set_linkled(sc);
4429
4430 return (0);
4431 }
4432
4433 /*
4434 * wm_tbi_set_linkled:
4435 *
4436 * Update the link LED on 1000BASE-X devices.
4437 */
4438 static void
4439 wm_tbi_set_linkled(struct wm_softc *sc)
4440 {
4441
4442 if (sc->sc_tbi_linkup)
4443 sc->sc_ctrl |= CTRL_SWDPIN(0);
4444 else
4445 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4446
4447 /* 82540 or newer devices are active low */
4448 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4449
4450 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4451 }
4452
4453 /*
4454 * wm_tbi_check_link:
4455 *
4456 * Check the link on 1000BASE-X devices.
4457 */
4458 static void
4459 wm_tbi_check_link(struct wm_softc *sc)
4460 {
4461 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4462 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4463 uint32_t rxcw, ctrl, status;
4464
4465 status = CSR_READ(sc, WMREG_STATUS);
4466
4467 rxcw = CSR_READ(sc, WMREG_RXCW);
4468 ctrl = CSR_READ(sc, WMREG_CTRL);
4469
4470 /* set link status */
4471 if ((status & STATUS_LU) == 0) {
4472 DPRINTF(WM_DEBUG_LINK,
4473 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4474 sc->sc_tbi_linkup = 0;
4475 } else if (sc->sc_tbi_linkup == 0) {
4476 DPRINTF(WM_DEBUG_LINK,
4477 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4478 (status & STATUS_FD) ? "FDX" : "HDX"));
4479 sc->sc_tbi_linkup = 1;
4480 }
4481
4482 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4483 && ((status & STATUS_LU) == 0)) {
4484 sc->sc_tbi_linkup = 0;
4485 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4486 /* RXCFG storm! */
4487 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4488 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4489 wm_init(ifp);
4490 wm_start(ifp);
4491 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4492 /* If the timer expired, retry autonegotiation */
4493 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4494 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4495 sc->sc_tbi_ticks = 0;
4496 /*
4497 * Reset the link, and let autonegotiation do
4498 * its thing
4499 */
4500 sc->sc_ctrl |= CTRL_LRST;
4501 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4502 delay(1000);
4503 sc->sc_ctrl &= ~CTRL_LRST;
4504 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4505 delay(1000);
4506 CSR_WRITE(sc, WMREG_TXCW,
4507 sc->sc_txcw & ~TXCW_ANE);
4508 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4509 }
4510 }
4511 }
4512
4513 wm_tbi_set_linkled(sc);
4514 }
4515
4516 /*
4517 * wm_gmii_reset:
4518 *
4519 * Reset the PHY.
4520 */
4521 static void
4522 wm_gmii_reset(struct wm_softc *sc)
4523 {
4524 uint32_t reg;
4525 int func = 0; /* XXX gcc */
4526
4527 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4528 || (sc->sc_type == WM_T_ICH10)) {
4529 if (wm_get_swfwhw_semaphore(sc)) {
4530 aprint_error_dev(sc->sc_dev,
4531 "%s: failed to get semaphore\n", __func__);
4532 return;
4533 }
4534 }
4535 if (sc->sc_type == WM_T_80003) {
4536 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4537 if (wm_get_swfw_semaphore(sc,
4538 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4539 aprint_error_dev(sc->sc_dev,
4540 "%s: failed to get semaphore\n", __func__);
4541 return;
4542 }
4543 }
4544
4545 switch (sc->sc_type) {
4546 case WM_T_82542_2_0:
4547 case WM_T_82542_2_1:
4548 /* null ? */
4549 break;
4550 case WM_T_82543:
4551 /*
4552 * With 82543, we need to force speed and duplex on the MAC
4553 * equal to what the PHY speed and duplex configuration is.
4554 * In addition, we need to perform a hardware reset on the PHY
4555 * to take it out of reset.
4556 */
4557 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4558 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4559
4560 /* The PHY reset pin is active-low. */
4561 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4562 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4563 CTRL_EXT_SWDPIN(4));
4564 reg |= CTRL_EXT_SWDPIO(4);
4565
4566 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4567 delay(10);
4568
4569 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4570 delay(10*1000);
4571
4572 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4573 delay(150);
4574 #if 0
4575 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4576 #endif
4577 delay(20*1000); /* extra delay to get PHY ID? */
4578 break;
4579 case WM_T_82544: /* reset 10000us */
4580 case WM_T_82540:
4581 case WM_T_82545:
4582 case WM_T_82545_3:
4583 case WM_T_82546:
4584 case WM_T_82546_3:
4585 case WM_T_82541:
4586 case WM_T_82541_2:
4587 case WM_T_82547:
4588 case WM_T_82547_2:
4589 case WM_T_82571: /* reset 100us */
4590 case WM_T_82572:
4591 case WM_T_82573:
4592 case WM_T_82574:
4593 case WM_T_82583:
4594 case WM_T_80003:
4595 /* generic reset */
4596 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4597 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4598 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4599 delay(150*1000);
4600
4601 if ((sc->sc_type == WM_T_82541)
4602 || (sc->sc_type == WM_T_82541_2)
4603 || (sc->sc_type == WM_T_82547)
4604 || (sc->sc_type == WM_T_82547_2)) {
4605 /* workaround for igp are done in igp_reset() */
4606 /* XXX add code to set LED after phy reset */
4607 }
4608 break;
4609 case WM_T_ICH8:
4610 case WM_T_ICH9:
4611 case WM_T_ICH10:
4612 /* generic reset */
4613 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4614 delay(100);
4615 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4616 delay(150*1000);
4617
4618 /* Allow time for h/w to get to a quiescent state afer reset */
4619 delay(10*1000);
4620
4621 /* XXX add code to set LED after phy reset */
4622 break;
4623 default:
4624 panic("unknown sc_type\n");
4625 break;
4626 }
4627
4628 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4629 || (sc->sc_type == WM_T_ICH10))
4630 wm_put_swfwhw_semaphore(sc);
4631 if (sc->sc_type == WM_T_80003)
4632 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4633 }
4634
4635 /*
4636 * wm_gmii_mediainit:
4637 *
4638 * Initialize media for use on 1000BASE-T devices.
4639 */
4640 static void
4641 wm_gmii_mediainit(struct wm_softc *sc)
4642 {
4643 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4644
4645 /* We have MII. */
4646 sc->sc_flags |= WM_F_HAS_MII;
4647
4648 if (sc->sc_type == WM_T_80003)
4649 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4650 else
4651 sc->sc_tipg = TIPG_1000T_DFLT;
4652
4653 /*
4654 * Let the chip set speed/duplex on its own based on
4655 * signals from the PHY.
4656 * XXXbouyer - I'm not sure this is right for the 80003,
4657 * the em driver only sets CTRL_SLU here - but it seems to work.
4658 */
4659 sc->sc_ctrl |= CTRL_SLU;
4660 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4661
4662 /* Initialize our media structures and probe the GMII. */
4663 sc->sc_mii.mii_ifp = ifp;
4664
4665 if (sc->sc_type >= WM_T_80003) {
4666 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4667 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4668 } else if (sc->sc_type >= WM_T_82544) {
4669 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4670 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4671 } else {
4672 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4673 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4674 }
4675 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4676
4677 wm_gmii_reset(sc);
4678
4679 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4680 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4681 wm_gmii_mediastatus);
4682
4683 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4684 MII_OFFSET_ANY, MIIF_DOPAUSE);
4685
4686 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4687 /* if failed, retry with *_bm_* */
4688 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4689 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4690
4691 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4692 MII_OFFSET_ANY, MIIF_DOPAUSE);
4693 }
4694 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4695 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4696 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4697 } else {
4698 if (sc->sc_type >= WM_T_82574) {
4699 struct mii_softc *child;
4700
4701 child = LIST_FIRST(&sc->sc_mii.mii_phys);
4702 /* fix read/write functions as e1000 driver */
4703 if (device_is_a(child->mii_dev, "igphy")) {
4704 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4705 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4706 } else {
4707 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4708 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4709 }
4710 }
4711
4712 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4713 }
4714 }
4715
4716 /*
4717 * wm_gmii_mediastatus: [ifmedia interface function]
4718 *
4719 * Get the current interface media status on a 1000BASE-T device.
4720 */
4721 static void
4722 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4723 {
4724 struct wm_softc *sc = ifp->if_softc;
4725
4726 ether_mediastatus(ifp, ifmr);
4727 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4728 sc->sc_flowflags;
4729 }
4730
4731 /*
4732 * wm_gmii_mediachange: [ifmedia interface function]
4733 *
4734 * Set hardware to newly-selected media on a 1000BASE-T device.
4735 */
4736 static int
4737 wm_gmii_mediachange(struct ifnet *ifp)
4738 {
4739 struct wm_softc *sc = ifp->if_softc;
4740 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4741 int rc;
4742
4743 if ((ifp->if_flags & IFF_UP) == 0)
4744 return 0;
4745
4746 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4747 sc->sc_ctrl |= CTRL_SLU;
4748 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4749 || (sc->sc_type > WM_T_82543)) {
4750 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4751 } else {
4752 sc->sc_ctrl &= ~CTRL_ASDE;
4753 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4754 if (ife->ifm_media & IFM_FDX)
4755 sc->sc_ctrl |= CTRL_FD;
4756 switch(IFM_SUBTYPE(ife->ifm_media)) {
4757 case IFM_10_T:
4758 sc->sc_ctrl |= CTRL_SPEED_10;
4759 break;
4760 case IFM_100_TX:
4761 sc->sc_ctrl |= CTRL_SPEED_100;
4762 break;
4763 case IFM_1000_T:
4764 sc->sc_ctrl |= CTRL_SPEED_1000;
4765 break;
4766 default:
4767 panic("wm_gmii_mediachange: bad media 0x%x",
4768 ife->ifm_media);
4769 }
4770 }
4771 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4772 if (sc->sc_type <= WM_T_82543)
4773 wm_gmii_reset(sc);
4774
4775 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4776 return 0;
4777 return rc;
4778 }
4779
4780 #define MDI_IO CTRL_SWDPIN(2)
4781 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4782 #define MDI_CLK CTRL_SWDPIN(3)
4783
4784 static void
4785 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4786 {
4787 uint32_t i, v;
4788
4789 v = CSR_READ(sc, WMREG_CTRL);
4790 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4791 v |= MDI_DIR | CTRL_SWDPIO(3);
4792
4793 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4794 if (data & i)
4795 v |= MDI_IO;
4796 else
4797 v &= ~MDI_IO;
4798 CSR_WRITE(sc, WMREG_CTRL, v);
4799 delay(10);
4800 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4801 delay(10);
4802 CSR_WRITE(sc, WMREG_CTRL, v);
4803 delay(10);
4804 }
4805 }
4806
4807 static uint32_t
4808 i82543_mii_recvbits(struct wm_softc *sc)
4809 {
4810 uint32_t v, i, data = 0;
4811
4812 v = CSR_READ(sc, WMREG_CTRL);
4813 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4814 v |= CTRL_SWDPIO(3);
4815
4816 CSR_WRITE(sc, WMREG_CTRL, v);
4817 delay(10);
4818 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4819 delay(10);
4820 CSR_WRITE(sc, WMREG_CTRL, v);
4821 delay(10);
4822
4823 for (i = 0; i < 16; i++) {
4824 data <<= 1;
4825 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4826 delay(10);
4827 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4828 data |= 1;
4829 CSR_WRITE(sc, WMREG_CTRL, v);
4830 delay(10);
4831 }
4832
4833 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4834 delay(10);
4835 CSR_WRITE(sc, WMREG_CTRL, v);
4836 delay(10);
4837
4838 return (data);
4839 }
4840
4841 #undef MDI_IO
4842 #undef MDI_DIR
4843 #undef MDI_CLK
4844
4845 /*
4846 * wm_gmii_i82543_readreg: [mii interface function]
4847 *
4848 * Read a PHY register on the GMII (i82543 version).
4849 */
4850 static int
4851 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4852 {
4853 struct wm_softc *sc = device_private(self);
4854 int rv;
4855
4856 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4857 i82543_mii_sendbits(sc, reg | (phy << 5) |
4858 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4859 rv = i82543_mii_recvbits(sc) & 0xffff;
4860
4861 DPRINTF(WM_DEBUG_GMII,
4862 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4863 device_xname(sc->sc_dev), phy, reg, rv));
4864
4865 return (rv);
4866 }
4867
4868 /*
4869 * wm_gmii_i82543_writereg: [mii interface function]
4870 *
4871 * Write a PHY register on the GMII (i82543 version).
4872 */
4873 static void
4874 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4875 {
4876 struct wm_softc *sc = device_private(self);
4877
4878 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4879 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4880 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4881 (MII_COMMAND_START << 30), 32);
4882 }
4883
4884 /*
4885 * wm_gmii_i82544_readreg: [mii interface function]
4886 *
4887 * Read a PHY register on the GMII.
4888 */
4889 static int
4890 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4891 {
4892 struct wm_softc *sc = device_private(self);
4893 uint32_t mdic = 0;
4894 int i, rv;
4895
4896 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4897 MDIC_REGADD(reg));
4898
4899 for (i = 0; i < 320; i++) {
4900 mdic = CSR_READ(sc, WMREG_MDIC);
4901 if (mdic & MDIC_READY)
4902 break;
4903 delay(10);
4904 }
4905
4906 if ((mdic & MDIC_READY) == 0) {
4907 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4908 device_xname(sc->sc_dev), phy, reg);
4909 rv = 0;
4910 } else if (mdic & MDIC_E) {
4911 #if 0 /* This is normal if no PHY is present. */
4912 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4913 device_xname(sc->sc_dev), phy, reg);
4914 #endif
4915 rv = 0;
4916 } else {
4917 rv = MDIC_DATA(mdic);
4918 if (rv == 0xffff)
4919 rv = 0;
4920 }
4921
4922 return (rv);
4923 }
4924
4925 /*
4926 * wm_gmii_i82544_writereg: [mii interface function]
4927 *
4928 * Write a PHY register on the GMII.
4929 */
4930 static void
4931 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4932 {
4933 struct wm_softc *sc = device_private(self);
4934 uint32_t mdic = 0;
4935 int i;
4936
4937 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4938 MDIC_REGADD(reg) | MDIC_DATA(val));
4939
4940 for (i = 0; i < 320; i++) {
4941 mdic = CSR_READ(sc, WMREG_MDIC);
4942 if (mdic & MDIC_READY)
4943 break;
4944 delay(10);
4945 }
4946
4947 if ((mdic & MDIC_READY) == 0)
4948 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4949 device_xname(sc->sc_dev), phy, reg);
4950 else if (mdic & MDIC_E)
4951 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4952 device_xname(sc->sc_dev), phy, reg);
4953 }
4954
4955 /*
4956 * wm_gmii_i80003_readreg: [mii interface function]
4957 *
4958 * Read a PHY register on the kumeran
4959 * This could be handled by the PHY layer if we didn't have to lock the
4960 * ressource ...
4961 */
4962 static int
4963 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4964 {
4965 struct wm_softc *sc = device_private(self);
4966 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4967 int rv;
4968
4969 if (phy != 1) /* only one PHY on kumeran bus */
4970 return 0;
4971
4972 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4973 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4974 __func__);
4975 return 0;
4976 }
4977
4978 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4979 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4980 reg >> GG82563_PAGE_SHIFT);
4981 } else {
4982 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4983 reg >> GG82563_PAGE_SHIFT);
4984 }
4985 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4986 delay(200);
4987 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4988 delay(200);
4989
4990 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4991 return (rv);
4992 }
4993
4994 /*
4995 * wm_gmii_i80003_writereg: [mii interface function]
4996 *
4997 * Write a PHY register on the kumeran.
4998 * This could be handled by the PHY layer if we didn't have to lock the
4999 * ressource ...
5000 */
5001 static void
5002 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
5003 {
5004 struct wm_softc *sc = device_private(self);
5005 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5006
5007 if (phy != 1) /* only one PHY on kumeran bus */
5008 return;
5009
5010 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5011 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5012 __func__);
5013 return;
5014 }
5015
5016 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
5017 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5018 reg >> GG82563_PAGE_SHIFT);
5019 } else {
5020 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
5021 reg >> GG82563_PAGE_SHIFT);
5022 }
5023 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5024 delay(200);
5025 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5026 delay(200);
5027
5028 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5029 }
5030
5031 /*
5032 * wm_gmii_bm_readreg: [mii interface function]
5033 *
5034 * Read a PHY register on the kumeran
5035 * This could be handled by the PHY layer if we didn't have to lock the
5036 * ressource ...
5037 */
5038 static int
5039 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5040 {
5041 struct wm_softc *sc = device_private(self);
5042 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5043 int rv;
5044
5045 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5046 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5047 __func__);
5048 return 0;
5049 }
5050
5051 if (reg > GG82563_MAX_REG_ADDRESS) {
5052 if (phy == 1)
5053 wm_gmii_i82544_writereg(self, phy, 0x1f,
5054 reg);
5055 else
5056 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5057 reg >> GG82563_PAGE_SHIFT);
5058
5059 }
5060
5061 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5062 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5063 return (rv);
5064 }
5065
5066 /*
5067 * wm_gmii_bm_writereg: [mii interface function]
5068 *
5069 * Write a PHY register on the kumeran.
5070 * This could be handled by the PHY layer if we didn't have to lock the
5071 * ressource ...
5072 */
5073 static void
5074 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5075 {
5076 struct wm_softc *sc = device_private(self);
5077 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5078
5079 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5080 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5081 __func__);
5082 return;
5083 }
5084
5085 if (reg > GG82563_MAX_REG_ADDRESS) {
5086 if (phy == 1)
5087 wm_gmii_i82544_writereg(self, phy, 0x1f,
5088 reg);
5089 else
5090 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5091 reg >> GG82563_PAGE_SHIFT);
5092
5093 }
5094
5095 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5096 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5097 }
5098
5099 /*
5100 * wm_gmii_statchg: [mii interface function]
5101 *
5102 * Callback from MII layer when media changes.
5103 */
5104 static void
5105 wm_gmii_statchg(device_t self)
5106 {
5107 struct wm_softc *sc = device_private(self);
5108 struct mii_data *mii = &sc->sc_mii;
5109
5110 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5111 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5112 sc->sc_fcrtl &= ~FCRTL_XONE;
5113
5114 /*
5115 * Get flow control negotiation result.
5116 */
5117 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5118 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5119 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5120 mii->mii_media_active &= ~IFM_ETH_FMASK;
5121 }
5122
5123 if (sc->sc_flowflags & IFM_FLOW) {
5124 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5125 sc->sc_ctrl |= CTRL_TFCE;
5126 sc->sc_fcrtl |= FCRTL_XONE;
5127 }
5128 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5129 sc->sc_ctrl |= CTRL_RFCE;
5130 }
5131
5132 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5133 DPRINTF(WM_DEBUG_LINK,
5134 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5135 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5136 } else {
5137 DPRINTF(WM_DEBUG_LINK,
5138 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5139 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5140 }
5141
5142 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5143 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5144 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5145 : WMREG_FCRTL, sc->sc_fcrtl);
5146 if (sc->sc_type == WM_T_80003) {
5147 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5148 case IFM_1000_T:
5149 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5150 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5151 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5152 break;
5153 default:
5154 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5155 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5156 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5157 break;
5158 }
5159 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5160 }
5161 }
5162
5163 /*
5164 * wm_kmrn_readreg:
5165 *
5166 * Read a kumeran register
5167 */
5168 static int
5169 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5170 {
5171 int rv;
5172
5173 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5174 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5175 aprint_error_dev(sc->sc_dev,
5176 "%s: failed to get semaphore\n", __func__);
5177 return 0;
5178 }
5179 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5180 if (wm_get_swfwhw_semaphore(sc)) {
5181 aprint_error_dev(sc->sc_dev,
5182 "%s: failed to get semaphore\n", __func__);
5183 return 0;
5184 }
5185 }
5186
5187 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5188 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5189 KUMCTRLSTA_REN);
5190 delay(2);
5191
5192 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5193
5194 if (sc->sc_flags == WM_F_SWFW_SYNC)
5195 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5196 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5197 wm_put_swfwhw_semaphore(sc);
5198
5199 return (rv);
5200 }
5201
5202 /*
5203 * wm_kmrn_writereg:
5204 *
5205 * Write a kumeran register
5206 */
5207 static void
5208 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5209 {
5210
5211 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5212 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5213 aprint_error_dev(sc->sc_dev,
5214 "%s: failed to get semaphore\n", __func__);
5215 return;
5216 }
5217 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5218 if (wm_get_swfwhw_semaphore(sc)) {
5219 aprint_error_dev(sc->sc_dev,
5220 "%s: failed to get semaphore\n", __func__);
5221 return;
5222 }
5223 }
5224
5225 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5226 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5227 (val & KUMCTRLSTA_MASK));
5228
5229 if (sc->sc_flags == WM_F_SWFW_SYNC)
5230 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5231 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5232 wm_put_swfwhw_semaphore(sc);
5233 }
5234
5235 static int
5236 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5237 {
5238 uint32_t eecd = 0;
5239
5240 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5241 || sc->sc_type == WM_T_82583) {
5242 eecd = CSR_READ(sc, WMREG_EECD);
5243
5244 /* Isolate bits 15 & 16 */
5245 eecd = ((eecd >> 15) & 0x03);
5246
5247 /* If both bits are set, device is Flash type */
5248 if (eecd == 0x03)
5249 return 0;
5250 }
5251 return 1;
5252 }
5253
5254 static int
5255 wm_get_swsm_semaphore(struct wm_softc *sc)
5256 {
5257 int32_t timeout;
5258 uint32_t swsm;
5259
5260 /* Get the FW semaphore. */
5261 timeout = 1000 + 1; /* XXX */
5262 while (timeout) {
5263 swsm = CSR_READ(sc, WMREG_SWSM);
5264 swsm |= SWSM_SWESMBI;
5265 CSR_WRITE(sc, WMREG_SWSM, swsm);
5266 /* if we managed to set the bit we got the semaphore. */
5267 swsm = CSR_READ(sc, WMREG_SWSM);
5268 if (swsm & SWSM_SWESMBI)
5269 break;
5270
5271 delay(50);
5272 timeout--;
5273 }
5274
5275 if (timeout == 0) {
5276 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5277 /* Release semaphores */
5278 wm_put_swsm_semaphore(sc);
5279 return 1;
5280 }
5281 return 0;
5282 }
5283
5284 static void
5285 wm_put_swsm_semaphore(struct wm_softc *sc)
5286 {
5287 uint32_t swsm;
5288
5289 swsm = CSR_READ(sc, WMREG_SWSM);
5290 swsm &= ~(SWSM_SWESMBI);
5291 CSR_WRITE(sc, WMREG_SWSM, swsm);
5292 }
5293
5294 static int
5295 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5296 {
5297 uint32_t swfw_sync;
5298 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5299 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5300 int timeout = 200;
5301
5302 for(timeout = 0; timeout < 200; timeout++) {
5303 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5304 if (wm_get_swsm_semaphore(sc)) {
5305 aprint_error_dev(sc->sc_dev,
5306 "%s: failed to get semaphore\n",
5307 __func__);
5308 return 1;
5309 }
5310 }
5311 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5312 if ((swfw_sync & (swmask | fwmask)) == 0) {
5313 swfw_sync |= swmask;
5314 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5315 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5316 wm_put_swsm_semaphore(sc);
5317 return 0;
5318 }
5319 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5320 wm_put_swsm_semaphore(sc);
5321 delay(5000);
5322 }
5323 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5324 device_xname(sc->sc_dev), mask, swfw_sync);
5325 return 1;
5326 }
5327
5328 static void
5329 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5330 {
5331 uint32_t swfw_sync;
5332
5333 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5334 while (wm_get_swsm_semaphore(sc) != 0)
5335 continue;
5336 }
5337 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5338 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5339 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5340 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5341 wm_put_swsm_semaphore(sc);
5342 }
5343
5344 static int
5345 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5346 {
5347 uint32_t ext_ctrl;
5348 int timeout = 200;
5349
5350 for(timeout = 0; timeout < 200; timeout++) {
5351 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5352 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5353 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5354
5355 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5356 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5357 return 0;
5358 delay(5000);
5359 }
5360 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5361 device_xname(sc->sc_dev), ext_ctrl);
5362 return 1;
5363 }
5364
5365 static void
5366 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5367 {
5368 uint32_t ext_ctrl;
5369 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5370 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5371 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5372 }
5373
5374 static int
5375 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5376 {
5377 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5378 uint8_t bank_high_byte;
5379 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5380
5381 if (sc->sc_type != WM_T_ICH10) {
5382 /* Value of bit 22 corresponds to the flash bank we're on. */
5383 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5384 } else {
5385 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5386 if ((bank_high_byte & 0xc0) == 0x80)
5387 *bank = 0;
5388 else {
5389 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5390 &bank_high_byte);
5391 if ((bank_high_byte & 0xc0) == 0x80)
5392 *bank = 1;
5393 else {
5394 aprint_error_dev(sc->sc_dev,
5395 "EEPROM not present\n");
5396 return -1;
5397 }
5398 }
5399 }
5400
5401 return 0;
5402 }
5403
5404 /******************************************************************************
5405 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5406 * register.
5407 *
5408 * sc - Struct containing variables accessed by shared code
5409 * offset - offset of word in the EEPROM to read
5410 * data - word read from the EEPROM
5411 * words - number of words to read
5412 *****************************************************************************/
5413 static int
5414 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5415 {
5416 int32_t error = 0;
5417 uint32_t flash_bank = 0;
5418 uint32_t act_offset = 0;
5419 uint32_t bank_offset = 0;
5420 uint16_t word = 0;
5421 uint16_t i = 0;
5422
5423 /* We need to know which is the valid flash bank. In the event
5424 * that we didn't allocate eeprom_shadow_ram, we may not be
5425 * managing flash_bank. So it cannot be trusted and needs
5426 * to be updated with each read.
5427 */
5428 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5429 if (error) {
5430 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5431 __func__);
5432 return error;
5433 }
5434
5435 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5436 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5437
5438 error = wm_get_swfwhw_semaphore(sc);
5439 if (error) {
5440 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5441 __func__);
5442 return error;
5443 }
5444
5445 for (i = 0; i < words; i++) {
5446 /* The NVM part needs a byte offset, hence * 2 */
5447 act_offset = bank_offset + ((offset + i) * 2);
5448 error = wm_read_ich8_word(sc, act_offset, &word);
5449 if (error) {
5450 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5451 __func__);
5452 break;
5453 }
5454 data[i] = word;
5455 }
5456
5457 wm_put_swfwhw_semaphore(sc);
5458 return error;
5459 }
5460
5461 /******************************************************************************
5462 * This function does initial flash setup so that a new read/write/erase cycle
5463 * can be started.
5464 *
5465 * sc - The pointer to the hw structure
5466 ****************************************************************************/
5467 static int32_t
5468 wm_ich8_cycle_init(struct wm_softc *sc)
5469 {
5470 uint16_t hsfsts;
5471 int32_t error = 1;
5472 int32_t i = 0;
5473
5474 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5475
5476 /* May be check the Flash Des Valid bit in Hw status */
5477 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5478 return error;
5479 }
5480
5481 /* Clear FCERR in Hw status by writing 1 */
5482 /* Clear DAEL in Hw status by writing a 1 */
5483 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5484
5485 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5486
5487 /* Either we should have a hardware SPI cycle in progress bit to check
5488 * against, in order to start a new cycle or FDONE bit should be changed
5489 * in the hardware so that it is 1 after harware reset, which can then be
5490 * used as an indication whether a cycle is in progress or has been
5491 * completed .. we should also have some software semaphore mechanism to
5492 * guard FDONE or the cycle in progress bit so that two threads access to
5493 * those bits can be sequentiallized or a way so that 2 threads dont
5494 * start the cycle at the same time */
5495
5496 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5497 /* There is no cycle running at present, so we can start a cycle */
5498 /* Begin by setting Flash Cycle Done. */
5499 hsfsts |= HSFSTS_DONE;
5500 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5501 error = 0;
5502 } else {
5503 /* otherwise poll for sometime so the current cycle has a chance
5504 * to end before giving up. */
5505 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5506 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5507 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5508 error = 0;
5509 break;
5510 }
5511 delay(1);
5512 }
5513 if (error == 0) {
5514 /* Successful in waiting for previous cycle to timeout,
5515 * now set the Flash Cycle Done. */
5516 hsfsts |= HSFSTS_DONE;
5517 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5518 }
5519 }
5520 return error;
5521 }
5522
5523 /******************************************************************************
5524 * This function starts a flash cycle and waits for its completion
5525 *
5526 * sc - The pointer to the hw structure
5527 ****************************************************************************/
5528 static int32_t
5529 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5530 {
5531 uint16_t hsflctl;
5532 uint16_t hsfsts;
5533 int32_t error = 1;
5534 uint32_t i = 0;
5535
5536 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5537 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5538 hsflctl |= HSFCTL_GO;
5539 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5540
5541 /* wait till FDONE bit is set to 1 */
5542 do {
5543 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5544 if (hsfsts & HSFSTS_DONE)
5545 break;
5546 delay(1);
5547 i++;
5548 } while (i < timeout);
5549 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5550 error = 0;
5551 }
5552 return error;
5553 }
5554
5555 /******************************************************************************
5556 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5557 *
5558 * sc - The pointer to the hw structure
5559 * index - The index of the byte or word to read.
5560 * size - Size of data to read, 1=byte 2=word
5561 * data - Pointer to the word to store the value read.
5562 *****************************************************************************/
5563 static int32_t
5564 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5565 uint32_t size, uint16_t* data)
5566 {
5567 uint16_t hsfsts;
5568 uint16_t hsflctl;
5569 uint32_t flash_linear_address;
5570 uint32_t flash_data = 0;
5571 int32_t error = 1;
5572 int32_t count = 0;
5573
5574 if (size < 1 || size > 2 || data == 0x0 ||
5575 index > ICH_FLASH_LINEAR_ADDR_MASK)
5576 return error;
5577
5578 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5579 sc->sc_ich8_flash_base;
5580
5581 do {
5582 delay(1);
5583 /* Steps */
5584 error = wm_ich8_cycle_init(sc);
5585 if (error)
5586 break;
5587
5588 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5589 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5590 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5591 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5592 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5593
5594 /* Write the last 24 bits of index into Flash Linear address field in
5595 * Flash Address */
5596 /* TODO: TBD maybe check the index against the size of flash */
5597
5598 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5599
5600 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5601
5602 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5603 * sequence a few more times, else read in (shift in) the Flash Data0,
5604 * the order is least significant byte first msb to lsb */
5605 if (error == 0) {
5606 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5607 if (size == 1) {
5608 *data = (uint8_t)(flash_data & 0x000000FF);
5609 } else if (size == 2) {
5610 *data = (uint16_t)(flash_data & 0x0000FFFF);
5611 }
5612 break;
5613 } else {
5614 /* If we've gotten here, then things are probably completely hosed,
5615 * but if the error condition is detected, it won't hurt to give
5616 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5617 */
5618 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5619 if (hsfsts & HSFSTS_ERR) {
5620 /* Repeat for some time before giving up. */
5621 continue;
5622 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5623 break;
5624 }
5625 }
5626 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5627
5628 return error;
5629 }
5630
5631 /******************************************************************************
5632 * Reads a single byte from the NVM using the ICH8 flash access registers.
5633 *
5634 * sc - pointer to wm_hw structure
5635 * index - The index of the byte to read.
5636 * data - Pointer to a byte to store the value read.
5637 *****************************************************************************/
5638 static int32_t
5639 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5640 {
5641 int32_t status;
5642 uint16_t word = 0;
5643
5644 status = wm_read_ich8_data(sc, index, 1, &word);
5645 if (status == 0) {
5646 *data = (uint8_t)word;
5647 }
5648
5649 return status;
5650 }
5651
5652 /******************************************************************************
5653 * Reads a word from the NVM using the ICH8 flash access registers.
5654 *
5655 * sc - pointer to wm_hw structure
5656 * index - The starting byte index of the word to read.
5657 * data - Pointer to a word to store the value read.
5658 *****************************************************************************/
5659 static int32_t
5660 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5661 {
5662 int32_t status;
5663
5664 status = wm_read_ich8_data(sc, index, 2, data);
5665 return status;
5666 }
5667
5668 static int
5669 wm_check_mng_mode(struct wm_softc *sc)
5670 {
5671 int rv;
5672
5673 switch (sc->sc_type) {
5674 case WM_T_ICH8:
5675 case WM_T_ICH9:
5676 case WM_T_ICH10:
5677 rv = wm_check_mng_mode_ich8lan(sc);
5678 break;
5679 case WM_T_82574:
5680 case WM_T_82583:
5681 rv = wm_check_mng_mode_82574(sc);
5682 break;
5683 case WM_T_82571:
5684 case WM_T_82572:
5685 case WM_T_82573:
5686 case WM_T_80003:
5687 rv = wm_check_mng_mode_generic(sc);
5688 break;
5689 default:
5690 /* noting to do */
5691 rv = 0;
5692 break;
5693 }
5694
5695 return rv;
5696 }
5697
5698 static int
5699 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5700 {
5701 uint32_t fwsm;
5702
5703 fwsm = CSR_READ(sc, WMREG_FWSM);
5704
5705 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5706 return 1;
5707
5708 return 0;
5709 }
5710
5711 static int
5712 wm_check_mng_mode_82574(struct wm_softc *sc)
5713 {
5714 uint16_t data;
5715
5716 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
5717
5718 if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
5719 return 1;
5720
5721 return 0;
5722 }
5723
5724 static int
5725 wm_check_mng_mode_generic(struct wm_softc *sc)
5726 {
5727 uint32_t fwsm;
5728
5729 fwsm = CSR_READ(sc, WMREG_FWSM);
5730
5731 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5732 return 1;
5733
5734 return 0;
5735 }
5736
5737 static void
5738 wm_get_hw_control(struct wm_softc *sc)
5739 {
5740 uint32_t reg;
5741
5742 switch (sc->sc_type) {
5743 case WM_T_82573:
5744 #if 0
5745 case WM_T_82574:
5746 case WM_T_82583:
5747 /*
5748 * FreeBSD's em driver has the function for 82574 to checks
5749 * the management mode, but it's not used. Why?
5750 */
5751 #endif
5752 reg = CSR_READ(sc, WMREG_SWSM);
5753 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5754 break;
5755 case WM_T_82571:
5756 case WM_T_82572:
5757 case WM_T_80003:
5758 case WM_T_ICH8:
5759 case WM_T_ICH9:
5760 case WM_T_ICH10:
5761 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5762 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5763 break;
5764 default:
5765 break;
5766 }
5767 }
5768
5769 /* XXX Currently TBI only */
5770 static int
5771 wm_check_for_link(struct wm_softc *sc)
5772 {
5773 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5774 uint32_t rxcw;
5775 uint32_t ctrl;
5776 uint32_t status;
5777 uint32_t sig;
5778
5779 rxcw = CSR_READ(sc, WMREG_RXCW);
5780 ctrl = CSR_READ(sc, WMREG_CTRL);
5781 status = CSR_READ(sc, WMREG_STATUS);
5782
5783 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5784
5785 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5786 device_xname(sc->sc_dev), __func__,
5787 ((ctrl & CTRL_SWDPIN(1)) == sig),
5788 ((status & STATUS_LU) != 0),
5789 ((rxcw & RXCW_C) != 0)
5790 ));
5791
5792 /*
5793 * SWDPIN LU RXCW
5794 * 0 0 0
5795 * 0 0 1 (should not happen)
5796 * 0 1 0 (should not happen)
5797 * 0 1 1 (should not happen)
5798 * 1 0 0 Disable autonego and force linkup
5799 * 1 0 1 got /C/ but not linkup yet
5800 * 1 1 0 (linkup)
5801 * 1 1 1 If IFM_AUTO, back to autonego
5802 *
5803 */
5804 if (((ctrl & CTRL_SWDPIN(1)) == sig)
5805 && ((status & STATUS_LU) == 0)
5806 && ((rxcw & RXCW_C) == 0)) {
5807 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5808 __func__));
5809 sc->sc_tbi_linkup = 0;
5810 /* Disable auto-negotiation in the TXCW register */
5811 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5812
5813 /*
5814 * Force link-up and also force full-duplex.
5815 *
5816 * NOTE: CTRL was updated TFCE and RFCE automatically,
5817 * so we should update sc->sc_ctrl
5818 */
5819 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5821 } else if(((status & STATUS_LU) != 0)
5822 && ((rxcw & RXCW_C) != 0)
5823 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5824 sc->sc_tbi_linkup = 1;
5825 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5826 __func__));
5827 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5828 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5829 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5830 && ((rxcw & RXCW_C) != 0)) {
5831 DPRINTF(WM_DEBUG_LINK, ("/C/"));
5832 } else {
5833 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5834 status));
5835 }
5836
5837 return 0;
5838 }
5839