if_wm.c revision 1.186 1 /* $NetBSD: if_wm.c,v 1.186 2010/01/05 09:31:21 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.186 2010/01/05 09:31:21 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
140
141 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
142 #else
143 #define DPRINTF(x, y) /* nothing */
144 #endif /* WM_DEBUG */
145
146 /*
147 * Transmit descriptor list size. Due to errata, we can only have
148 * 256 hardware descriptors in the ring on < 82544, but we use 4096
149 * on >= 82544. We tell the upper layers that they can queue a lot
150 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
151 * of them at a time.
152 *
153 * We allow up to 256 (!) DMA segments per packet. Pathological packet
154 * chains containing many small mbufs have been observed in zero-copy
155 * situations with jumbo frames.
156 */
157 #define WM_NTXSEGS 256
158 #define WM_IFQUEUELEN 256
159 #define WM_TXQUEUELEN_MAX 64
160 #define WM_TXQUEUELEN_MAX_82547 16
161 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
162 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
163 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
164 #define WM_NTXDESC_82542 256
165 #define WM_NTXDESC_82544 4096
166 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
167 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
168 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
169 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
170 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
171
172 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
173
174 /*
175 * Receive descriptor list size. We have one Rx buffer for normal
176 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
177 * packet. We allocate 256 receive descriptors, each with a 2k
178 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
179 */
180 #define WM_NRXDESC 256
181 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
182 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
183 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
184
185 /*
186 * Control structures are DMA'd to the i82542 chip. We allocate them in
187 * a single clump that maps to a single DMA segment to make several things
188 * easier.
189 */
190 struct wm_control_data_82544 {
191 /*
192 * The receive descriptors.
193 */
194 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
195
196 /*
197 * The transmit descriptors. Put these at the end, because
198 * we might use a smaller number of them.
199 */
200 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 /*
236 * Software state per device.
237 */
238 struct wm_softc {
239 device_t sc_dev; /* generic device information */
240 bus_space_tag_t sc_st; /* bus space tag */
241 bus_space_handle_t sc_sh; /* bus space handle */
242 bus_space_tag_t sc_iot; /* I/O space tag */
243 bus_space_handle_t sc_ioh; /* I/O space handle */
244 bus_space_tag_t sc_flasht; /* flash registers space tag */
245 bus_space_handle_t sc_flashh; /* flash registers space handle */
246 bus_dma_tag_t sc_dmat; /* bus DMA tag */
247 struct ethercom sc_ethercom; /* ethernet common data */
248 pci_chipset_tag_t sc_pc;
249 pcitag_t sc_pcitag;
250
251 wm_chip_type sc_type; /* chip type */
252 int sc_flags; /* flags; see below */
253 int sc_if_flags; /* last if_flags */
254 int sc_bus_speed; /* PCI/PCIX bus speed */
255 int sc_pcix_offset; /* PCIX capability register offset */
256 int sc_flowflags; /* 802.3x flow control flags */
257
258 void *sc_ih; /* interrupt cookie */
259
260 int sc_ee_addrbits; /* EEPROM address bits */
261
262 struct mii_data sc_mii; /* MII/media information */
263
264 callout_t sc_tick_ch; /* tick callout */
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 int sc_align_tweak;
270
271 /*
272 * Software state for the transmit and receive descriptors.
273 */
274 int sc_txnum; /* must be a power of two */
275 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
276 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
277
278 /*
279 * Control data structures.
280 */
281 int sc_ntxdesc; /* must be a power of two */
282 struct wm_control_data_82544 *sc_control_data;
283 #define sc_txdescs sc_control_data->wcd_txdescs
284 #define sc_rxdescs sc_control_data->wcd_rxdescs
285
286 #ifdef WM_EVENT_COUNTERS
287 /* Event counters. */
288 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
289 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
290 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
291 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
292 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
293 struct evcnt sc_ev_rxintr; /* Rx interrupts */
294 struct evcnt sc_ev_linkintr; /* Link interrupts */
295
296 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
297 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
298 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
299 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
300 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
301 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
302 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
303 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
304
305 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
306 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
307
308 struct evcnt sc_ev_tu; /* Tx underrun */
309
310 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
311 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
312 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
313 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
314 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
315 #endif /* WM_EVENT_COUNTERS */
316
317 bus_addr_t sc_tdt_reg; /* offset of TDT register */
318
319 int sc_txfree; /* number of free Tx descriptors */
320 int sc_txnext; /* next ready Tx descriptor */
321
322 int sc_txsfree; /* number of free Tx jobs */
323 int sc_txsnext; /* next free Tx job */
324 int sc_txsdirty; /* dirty Tx jobs */
325
326 /* These 5 variables are used only on the 82547. */
327 int sc_txfifo_size; /* Tx FIFO size */
328 int sc_txfifo_head; /* current head of FIFO */
329 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
330 int sc_txfifo_stall; /* Tx FIFO is stalled */
331 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
332
333 bus_addr_t sc_rdt_reg; /* offset of RDT register */
334
335 int sc_rxptr; /* next ready Rx descriptor/queue ent */
336 int sc_rxdiscard;
337 int sc_rxlen;
338 struct mbuf *sc_rxhead;
339 struct mbuf *sc_rxtail;
340 struct mbuf **sc_rxtailp;
341
342 uint32_t sc_ctrl; /* prototype CTRL register */
343 #if 0
344 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
345 #endif
346 uint32_t sc_icr; /* prototype interrupt bits */
347 uint32_t sc_itr; /* prototype intr throttling reg */
348 uint32_t sc_tctl; /* prototype TCTL register */
349 uint32_t sc_rctl; /* prototype RCTL register */
350 uint32_t sc_txcw; /* prototype TXCW register */
351 uint32_t sc_tipg; /* prototype TIPG register */
352 uint32_t sc_fcrtl; /* prototype FCRTL register */
353 uint32_t sc_pba; /* prototype PBA register */
354
355 int sc_tbi_linkup; /* TBI link status */
356 int sc_tbi_anegticks; /* autonegotiation ticks */
357 int sc_tbi_ticks; /* tbi ticks */
358 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
359 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
360
361 int sc_mchash_type; /* multicast filter offset */
362
363 #if NRND > 0
364 rndsource_element_t rnd_source; /* random source */
365 #endif
366 int sc_ich8_flash_base;
367 int sc_ich8_flash_bank_size;
368 };
369
370 #define WM_RXCHAIN_RESET(sc) \
371 do { \
372 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
373 *(sc)->sc_rxtailp = NULL; \
374 (sc)->sc_rxlen = 0; \
375 } while (/*CONSTCOND*/0)
376
377 #define WM_RXCHAIN_LINK(sc, m) \
378 do { \
379 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
380 (sc)->sc_rxtailp = &(m)->m_next; \
381 } while (/*CONSTCOND*/0)
382
383 #ifdef WM_EVENT_COUNTERS
384 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
385 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
386 #else
387 #define WM_EVCNT_INCR(ev) /* nothing */
388 #define WM_EVCNT_ADD(ev, val) /* nothing */
389 #endif
390
391 #define CSR_READ(sc, reg) \
392 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
393 #define CSR_WRITE(sc, reg, val) \
394 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
395 #define CSR_WRITE_FLUSH(sc) \
396 (void) CSR_READ((sc), WMREG_STATUS)
397
398 #define ICH8_FLASH_READ32(sc, reg) \
399 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
400 #define ICH8_FLASH_WRITE32(sc, reg, data) \
401 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
402
403 #define ICH8_FLASH_READ16(sc, reg) \
404 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
405 #define ICH8_FLASH_WRITE16(sc, reg, data) \
406 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
407
408 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
409 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
410
411 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
412 #define WM_CDTXADDR_HI(sc, x) \
413 (sizeof(bus_addr_t) == 8 ? \
414 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
415
416 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
417 #define WM_CDRXADDR_HI(sc, x) \
418 (sizeof(bus_addr_t) == 8 ? \
419 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
420
421 #define WM_CDTXSYNC(sc, x, n, ops) \
422 do { \
423 int __x, __n; \
424 \
425 __x = (x); \
426 __n = (n); \
427 \
428 /* If it will wrap around, sync to the end of the ring. */ \
429 if ((__x + __n) > WM_NTXDESC(sc)) { \
430 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
431 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
432 (WM_NTXDESC(sc) - __x), (ops)); \
433 __n -= (WM_NTXDESC(sc) - __x); \
434 __x = 0; \
435 } \
436 \
437 /* Now sync whatever is left. */ \
438 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
439 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
440 } while (/*CONSTCOND*/0)
441
442 #define WM_CDRXSYNC(sc, x, ops) \
443 do { \
444 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
445 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
446 } while (/*CONSTCOND*/0)
447
448 #define WM_INIT_RXDESC(sc, x) \
449 do { \
450 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
451 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
452 struct mbuf *__m = __rxs->rxs_mbuf; \
453 \
454 /* \
455 * Note: We scoot the packet forward 2 bytes in the buffer \
456 * so that the payload after the Ethernet header is aligned \
457 * to a 4-byte boundary. \
458 * \
459 * XXX BRAINDAMAGE ALERT! \
460 * The stupid chip uses the same size for every buffer, which \
461 * is set in the Receive Control register. We are using the 2K \
462 * size option, but what we REALLY want is (2K - 2)! For this \
463 * reason, we can't "scoot" packets longer than the standard \
464 * Ethernet MTU. On strict-alignment platforms, if the total \
465 * size exceeds (2K - 2) we set align_tweak to 0 and let \
466 * the upper layer copy the headers. \
467 */ \
468 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
469 \
470 wm_set_dma_addr(&__rxd->wrx_addr, \
471 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
472 __rxd->wrx_len = 0; \
473 __rxd->wrx_cksum = 0; \
474 __rxd->wrx_status = 0; \
475 __rxd->wrx_errors = 0; \
476 __rxd->wrx_special = 0; \
477 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
478 \
479 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
480 } while (/*CONSTCOND*/0)
481
482 static void wm_start(struct ifnet *);
483 static void wm_watchdog(struct ifnet *);
484 static int wm_ioctl(struct ifnet *, u_long, void *);
485 static int wm_init(struct ifnet *);
486 static void wm_stop(struct ifnet *, int);
487
488 static void wm_reset(struct wm_softc *);
489 static void wm_rxdrain(struct wm_softc *);
490 static int wm_add_rxbuf(struct wm_softc *, int);
491 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
492 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
493 static int wm_validate_eeprom_checksum(struct wm_softc *);
494 static void wm_tick(void *);
495
496 static void wm_set_filter(struct wm_softc *);
497
498 static int wm_intr(void *);
499 static void wm_txintr(struct wm_softc *);
500 static void wm_rxintr(struct wm_softc *);
501 static void wm_linkintr(struct wm_softc *, uint32_t);
502
503 static void wm_tbi_mediainit(struct wm_softc *);
504 static int wm_tbi_mediachange(struct ifnet *);
505 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
506
507 static void wm_tbi_set_linkled(struct wm_softc *);
508 static void wm_tbi_check_link(struct wm_softc *);
509
510 static void wm_gmii_reset(struct wm_softc *);
511
512 static int wm_gmii_i82543_readreg(device_t, int, int);
513 static void wm_gmii_i82543_writereg(device_t, int, int, int);
514
515 static int wm_gmii_i82544_readreg(device_t, int, int);
516 static void wm_gmii_i82544_writereg(device_t, int, int, int);
517
518 static int wm_gmii_i80003_readreg(device_t, int, int);
519 static void wm_gmii_i80003_writereg(device_t, int, int, int);
520
521 static int wm_gmii_bm_readreg(device_t, int, int);
522 static void wm_gmii_bm_writereg(device_t, int, int, int);
523
524 static void wm_gmii_statchg(device_t);
525
526 static void wm_gmii_mediainit(struct wm_softc *);
527 static int wm_gmii_mediachange(struct ifnet *);
528 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
529
530 static int wm_kmrn_readreg(struct wm_softc *, int);
531 static void wm_kmrn_writereg(struct wm_softc *, int, int);
532
533 static void wm_set_spiaddrsize(struct wm_softc *);
534 static int wm_match(device_t, cfdata_t, void *);
535 static void wm_attach(device_t, device_t, void *);
536 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
537 static void wm_get_auto_rd_done(struct wm_softc *);
538 static int wm_get_swsm_semaphore(struct wm_softc *);
539 static void wm_put_swsm_semaphore(struct wm_softc *);
540 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
541 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
542 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
543 static int wm_get_swfwhw_semaphore(struct wm_softc *);
544 static void wm_put_swfwhw_semaphore(struct wm_softc *);
545
546 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
547 static int32_t wm_ich8_cycle_init(struct wm_softc *);
548 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
549 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
550 uint32_t, uint16_t *);
551 static int32_t wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
552 static int32_t wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
553 static void wm_82547_txfifo_stall(void *);
554 static int wm_check_mng_mode(struct wm_softc *);
555 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
556 static int wm_check_mng_mode_82574(struct wm_softc *);
557 static int wm_check_mng_mode_generic(struct wm_softc *);
558 static void wm_get_hw_control(struct wm_softc *);
559 static int wm_check_for_link(struct wm_softc *);
560
561 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
562 wm_match, wm_attach, NULL, NULL);
563
564 /*
565 * Devices supported by this driver.
566 */
567 static const struct wm_product {
568 pci_vendor_id_t wmp_vendor;
569 pci_product_id_t wmp_product;
570 const char *wmp_name;
571 wm_chip_type wmp_type;
572 int wmp_flags;
573 #define WMP_F_1000X 0x01
574 #define WMP_F_1000T 0x02
575 } wm_products[] = {
576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
577 "Intel i82542 1000BASE-X Ethernet",
578 WM_T_82542_2_1, WMP_F_1000X },
579
580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
581 "Intel i82543GC 1000BASE-X Ethernet",
582 WM_T_82543, WMP_F_1000X },
583
584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
585 "Intel i82543GC 1000BASE-T Ethernet",
586 WM_T_82543, WMP_F_1000T },
587
588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
589 "Intel i82544EI 1000BASE-T Ethernet",
590 WM_T_82544, WMP_F_1000T },
591
592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
593 "Intel i82544EI 1000BASE-X Ethernet",
594 WM_T_82544, WMP_F_1000X },
595
596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
597 "Intel i82544GC 1000BASE-T Ethernet",
598 WM_T_82544, WMP_F_1000T },
599
600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
601 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
602 WM_T_82544, WMP_F_1000T },
603
604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
605 "Intel i82540EM 1000BASE-T Ethernet",
606 WM_T_82540, WMP_F_1000T },
607
608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
609 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
610 WM_T_82540, WMP_F_1000T },
611
612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
613 "Intel i82540EP 1000BASE-T Ethernet",
614 WM_T_82540, WMP_F_1000T },
615
616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
617 "Intel i82540EP 1000BASE-T Ethernet",
618 WM_T_82540, WMP_F_1000T },
619
620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
621 "Intel i82540EP 1000BASE-T Ethernet",
622 WM_T_82540, WMP_F_1000T },
623
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
625 "Intel i82545EM 1000BASE-T Ethernet",
626 WM_T_82545, WMP_F_1000T },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
629 "Intel i82545GM 1000BASE-T Ethernet",
630 WM_T_82545_3, WMP_F_1000T },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
633 "Intel i82545GM 1000BASE-X Ethernet",
634 WM_T_82545_3, WMP_F_1000X },
635 #if 0
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
637 "Intel i82545GM Gigabit Ethernet (SERDES)",
638 WM_T_82545_3, WMP_F_SERDES },
639 #endif
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
641 "Intel i82546EB 1000BASE-T Ethernet",
642 WM_T_82546, WMP_F_1000T },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
645 "Intel i82546EB 1000BASE-T Ethernet",
646 WM_T_82546, WMP_F_1000T },
647
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
649 "Intel i82545EM 1000BASE-X Ethernet",
650 WM_T_82545, WMP_F_1000X },
651
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
653 "Intel i82546EB 1000BASE-X Ethernet",
654 WM_T_82546, WMP_F_1000X },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
657 "Intel i82546GB 1000BASE-T Ethernet",
658 WM_T_82546_3, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
661 "Intel i82546GB 1000BASE-X Ethernet",
662 WM_T_82546_3, WMP_F_1000X },
663 #if 0
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
665 "Intel i82546GB Gigabit Ethernet (SERDES)",
666 WM_T_82546_3, WMP_F_SERDES },
667 #endif
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
669 "i82546GB quad-port Gigabit Ethernet",
670 WM_T_82546_3, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
673 "i82546GB quad-port Gigabit Ethernet (KSP3)",
674 WM_T_82546_3, WMP_F_1000T },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
677 "Intel PRO/1000MT (82546GB)",
678 WM_T_82546_3, WMP_F_1000T },
679
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
681 "Intel i82541EI 1000BASE-T Ethernet",
682 WM_T_82541, WMP_F_1000T },
683
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
685 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
686 WM_T_82541, WMP_F_1000T },
687
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
689 "Intel i82541EI Mobile 1000BASE-T Ethernet",
690 WM_T_82541, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
693 "Intel i82541ER 1000BASE-T Ethernet",
694 WM_T_82541_2, WMP_F_1000T },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
697 "Intel i82541GI 1000BASE-T Ethernet",
698 WM_T_82541_2, WMP_F_1000T },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
701 "Intel i82541GI Mobile 1000BASE-T Ethernet",
702 WM_T_82541_2, WMP_F_1000T },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
705 "Intel i82541PI 1000BASE-T Ethernet",
706 WM_T_82541_2, WMP_F_1000T },
707
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
709 "Intel i82547EI 1000BASE-T Ethernet",
710 WM_T_82547, WMP_F_1000T },
711
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
713 "Intel i82547EI Mobile 1000BASE-T Ethernet",
714 WM_T_82547, WMP_F_1000T },
715
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
717 "Intel i82547GI 1000BASE-T Ethernet",
718 WM_T_82547_2, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
721 "Intel PRO/1000 PT (82571EB)",
722 WM_T_82571, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
725 "Intel PRO/1000 PF (82571EB)",
726 WM_T_82571, WMP_F_1000X },
727 #if 0
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
729 "Intel PRO/1000 PB (82571EB)",
730 WM_T_82571, WMP_F_SERDES },
731 #endif
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
733 "Intel PRO/1000 QT (82571EB)",
734 WM_T_82571, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
737 "Intel i82572EI 1000baseT Ethernet",
738 WM_T_82572, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
741 "Intel PRO/1000 PT Quad Port Server Adapter",
742 WM_T_82571, WMP_F_1000T, },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
745 "Intel i82572EI 1000baseX Ethernet",
746 WM_T_82572, WMP_F_1000X },
747 #if 0
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
749 "Intel i82572EI Gigabit Ethernet (SERDES)",
750 WM_T_82572, WMP_F_SERDES },
751 #endif
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
754 "Intel i82572EI 1000baseT Ethernet",
755 WM_T_82572, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
758 "Intel i82573E",
759 WM_T_82573, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
762 "Intel i82573E IAMT",
763 WM_T_82573, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
766 "Intel i82573L Gigabit Ethernet",
767 WM_T_82573, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
770 "Intel i82574L",
771 WM_T_82574, WMP_F_1000T },
772
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82583V,
774 "Intel i82583V",
775 WM_T_82583, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
778 "i80003 dual 1000baseT Ethernet",
779 WM_T_80003, WMP_F_1000T },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
782 "i80003 dual 1000baseX Ethernet",
783 WM_T_80003, WMP_F_1000T },
784 #if 0
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
786 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
787 WM_T_80003, WMP_F_SERDES },
788 #endif
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
791 "Intel i80003 1000baseT Ethernet",
792 WM_T_80003, WMP_F_1000T },
793 #if 0
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
795 "Intel i80003 Gigabit Ethernet (SERDES)",
796 WM_T_80003, WMP_F_SERDES },
797 #endif
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
799 "Intel i82801H (M_AMT) LAN Controller",
800 WM_T_ICH8, WMP_F_1000T },
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
802 "Intel i82801H (AMT) LAN Controller",
803 WM_T_ICH8, WMP_F_1000T },
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
805 "Intel i82801H LAN Controller",
806 WM_T_ICH8, WMP_F_1000T },
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
808 "Intel i82801H (IFE) LAN Controller",
809 WM_T_ICH8, WMP_F_1000T },
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
811 "Intel i82801H (M) LAN Controller",
812 WM_T_ICH8, WMP_F_1000T },
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
814 "Intel i82801H IFE (GT) LAN Controller",
815 WM_T_ICH8, WMP_F_1000T },
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
817 "Intel i82801H IFE (G) LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
820 "82801I (AMT) LAN Controller",
821 WM_T_ICH9, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
823 "82801I LAN Controller",
824 WM_T_ICH9, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
826 "82801I (G) LAN Controller",
827 WM_T_ICH9, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
829 "82801I (GT) LAN Controller",
830 WM_T_ICH9, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
832 "82801I (C) LAN Controller",
833 WM_T_ICH9, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
835 "82801I mobile LAN Controller",
836 WM_T_ICH9, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
838 "82801I mobile (V) LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
841 "82801I mobile (AMT) LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3,
844 "82567LM-3 LAN Controller",
845 WM_T_ICH10, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3,
847 "82567LF-3 LAN Controller",
848 WM_T_ICH10, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
850 "i82801J (LF) LAN Controller",
851 WM_T_ICH10, WMP_F_1000T },
852 { 0, 0,
853 NULL,
854 0, 0 },
855 };
856
857 #ifdef WM_EVENT_COUNTERS
858 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
859 #endif /* WM_EVENT_COUNTERS */
860
861 #if 0 /* Not currently used */
862 static inline uint32_t
863 wm_io_read(struct wm_softc *sc, int reg)
864 {
865
866 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
867 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
868 }
869 #endif
870
871 static inline void
872 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
873 {
874
875 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
876 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
877 }
878
879 static inline void
880 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
881 {
882 wa->wa_low = htole32(v & 0xffffffffU);
883 if (sizeof(bus_addr_t) == 8)
884 wa->wa_high = htole32((uint64_t) v >> 32);
885 else
886 wa->wa_high = 0;
887 }
888
889 static void
890 wm_set_spiaddrsize(struct wm_softc *sc)
891 {
892 uint32_t reg;
893
894 sc->sc_flags |= WM_F_EEPROM_SPI;
895 reg = CSR_READ(sc, WMREG_EECD);
896 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
897 }
898
899 static const struct wm_product *
900 wm_lookup(const struct pci_attach_args *pa)
901 {
902 const struct wm_product *wmp;
903
904 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
905 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
906 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
907 return (wmp);
908 }
909 return (NULL);
910 }
911
912 static int
913 wm_match(device_t parent, cfdata_t cf, void *aux)
914 {
915 struct pci_attach_args *pa = aux;
916
917 if (wm_lookup(pa) != NULL)
918 return (1);
919
920 return (0);
921 }
922
923 static void
924 wm_attach(device_t parent, device_t self, void *aux)
925 {
926 struct wm_softc *sc = device_private(self);
927 struct pci_attach_args *pa = aux;
928 prop_dictionary_t dict;
929 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
930 pci_chipset_tag_t pc = pa->pa_pc;
931 pci_intr_handle_t ih;
932 size_t cdata_size;
933 const char *intrstr = NULL;
934 const char *eetype, *xname;
935 bus_space_tag_t memt;
936 bus_space_handle_t memh;
937 bus_dma_segment_t seg;
938 int memh_valid;
939 int i, rseg, error;
940 const struct wm_product *wmp;
941 prop_data_t ea;
942 prop_number_t pn;
943 uint8_t enaddr[ETHER_ADDR_LEN];
944 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
945 pcireg_t preg, memtype;
946 uint32_t reg;
947
948 sc->sc_dev = self;
949 callout_init(&sc->sc_tick_ch, 0);
950
951 wmp = wm_lookup(pa);
952 if (wmp == NULL) {
953 printf("\n");
954 panic("wm_attach: impossible");
955 }
956
957 sc->sc_pc = pa->pa_pc;
958 sc->sc_pcitag = pa->pa_tag;
959
960 if (pci_dma64_available(pa))
961 sc->sc_dmat = pa->pa_dmat64;
962 else
963 sc->sc_dmat = pa->pa_dmat;
964
965 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
966 aprint_naive(": Ethernet controller\n");
967 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
968
969 sc->sc_type = wmp->wmp_type;
970 if (sc->sc_type < WM_T_82543) {
971 if (preg < 2) {
972 aprint_error_dev(sc->sc_dev,
973 "i82542 must be at least rev. 2\n");
974 return;
975 }
976 if (preg < 3)
977 sc->sc_type = WM_T_82542_2_0;
978 }
979
980 /* Set device properties (mactype) */
981 dict = device_properties(sc->sc_dev);
982 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
983
984 /*
985 * Map the device. All devices support memory-mapped acccess,
986 * and it is really required for normal operation.
987 */
988 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
989 switch (memtype) {
990 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
991 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
992 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
993 memtype, 0, &memt, &memh, NULL, NULL) == 0);
994 break;
995 default:
996 memh_valid = 0;
997 }
998
999 if (memh_valid) {
1000 sc->sc_st = memt;
1001 sc->sc_sh = memh;
1002 } else {
1003 aprint_error_dev(sc->sc_dev,
1004 "unable to map device registers\n");
1005 return;
1006 }
1007
1008 /*
1009 * In addition, i82544 and later support I/O mapped indirect
1010 * register access. It is not desirable (nor supported in
1011 * this driver) to use it for normal operation, though it is
1012 * required to work around bugs in some chip versions.
1013 */
1014 if (sc->sc_type >= WM_T_82544) {
1015 /* First we have to find the I/O BAR. */
1016 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1017 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1018 PCI_MAPREG_TYPE_IO)
1019 break;
1020 }
1021 if (i == PCI_MAPREG_END)
1022 aprint_error_dev(sc->sc_dev,
1023 "WARNING: unable to find I/O BAR\n");
1024 else {
1025 /*
1026 * The i8254x doesn't apparently respond when the
1027 * I/O BAR is 0, which looks somewhat like it's not
1028 * been configured.
1029 */
1030 preg = pci_conf_read(pc, pa->pa_tag, i);
1031 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1032 aprint_error_dev(sc->sc_dev,
1033 "WARNING: I/O BAR at zero.\n");
1034 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1035 0, &sc->sc_iot, &sc->sc_ioh,
1036 NULL, NULL) == 0) {
1037 sc->sc_flags |= WM_F_IOH_VALID;
1038 } else {
1039 aprint_error_dev(sc->sc_dev,
1040 "WARNING: unable to map I/O space\n");
1041 }
1042 }
1043
1044 }
1045
1046 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1047 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1048 preg |= PCI_COMMAND_MASTER_ENABLE;
1049 if (sc->sc_type < WM_T_82542_2_1)
1050 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1051 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1052
1053 /* power up chip */
1054 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1055 NULL)) && error != EOPNOTSUPP) {
1056 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1057 return;
1058 }
1059
1060 /*
1061 * Map and establish our interrupt.
1062 */
1063 if (pci_intr_map(pa, &ih)) {
1064 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1065 return;
1066 }
1067 intrstr = pci_intr_string(pc, ih);
1068 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1069 if (sc->sc_ih == NULL) {
1070 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1071 if (intrstr != NULL)
1072 aprint_error(" at %s", intrstr);
1073 aprint_error("\n");
1074 return;
1075 }
1076 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1077
1078 /*
1079 * Determine a few things about the bus we're connected to.
1080 */
1081 if (sc->sc_type < WM_T_82543) {
1082 /* We don't really know the bus characteristics here. */
1083 sc->sc_bus_speed = 33;
1084 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1085 /*
1086 * CSA (Communication Streaming Architecture) is about as fast
1087 * a 32-bit 66MHz PCI Bus.
1088 */
1089 sc->sc_flags |= WM_F_CSA;
1090 sc->sc_bus_speed = 66;
1091 aprint_verbose_dev(sc->sc_dev,
1092 "Communication Streaming Architecture\n");
1093 if (sc->sc_type == WM_T_82547) {
1094 callout_init(&sc->sc_txfifo_ch, 0);
1095 callout_setfunc(&sc->sc_txfifo_ch,
1096 wm_82547_txfifo_stall, sc);
1097 aprint_verbose_dev(sc->sc_dev,
1098 "using 82547 Tx FIFO stall work-around\n");
1099 }
1100 } else if (sc->sc_type >= WM_T_82571) {
1101 sc->sc_flags |= WM_F_PCIE;
1102 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1103 && (sc->sc_type != WM_T_ICH10))
1104 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1105 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1106 } else {
1107 reg = CSR_READ(sc, WMREG_STATUS);
1108 if (reg & STATUS_BUS64)
1109 sc->sc_flags |= WM_F_BUS64;
1110 if ((reg & STATUS_PCIX_MODE) != 0) {
1111 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1112
1113 sc->sc_flags |= WM_F_PCIX;
1114 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1115 PCI_CAP_PCIX,
1116 &sc->sc_pcix_offset, NULL) == 0)
1117 aprint_error_dev(sc->sc_dev,
1118 "unable to find PCIX capability\n");
1119 else if (sc->sc_type != WM_T_82545_3 &&
1120 sc->sc_type != WM_T_82546_3) {
1121 /*
1122 * Work around a problem caused by the BIOS
1123 * setting the max memory read byte count
1124 * incorrectly.
1125 */
1126 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1127 sc->sc_pcix_offset + PCI_PCIX_CMD);
1128 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1129 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1130
1131 bytecnt =
1132 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1133 PCI_PCIX_CMD_BYTECNT_SHIFT;
1134 maxb =
1135 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1136 PCI_PCIX_STATUS_MAXB_SHIFT;
1137 if (bytecnt > maxb) {
1138 aprint_verbose_dev(sc->sc_dev,
1139 "resetting PCI-X MMRBC: %d -> %d\n",
1140 512 << bytecnt, 512 << maxb);
1141 pcix_cmd = (pcix_cmd &
1142 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1143 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1144 pci_conf_write(pa->pa_pc, pa->pa_tag,
1145 sc->sc_pcix_offset + PCI_PCIX_CMD,
1146 pcix_cmd);
1147 }
1148 }
1149 }
1150 /*
1151 * The quad port adapter is special; it has a PCIX-PCIX
1152 * bridge on the board, and can run the secondary bus at
1153 * a higher speed.
1154 */
1155 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1156 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1157 : 66;
1158 } else if (sc->sc_flags & WM_F_PCIX) {
1159 switch (reg & STATUS_PCIXSPD_MASK) {
1160 case STATUS_PCIXSPD_50_66:
1161 sc->sc_bus_speed = 66;
1162 break;
1163 case STATUS_PCIXSPD_66_100:
1164 sc->sc_bus_speed = 100;
1165 break;
1166 case STATUS_PCIXSPD_100_133:
1167 sc->sc_bus_speed = 133;
1168 break;
1169 default:
1170 aprint_error_dev(sc->sc_dev,
1171 "unknown PCIXSPD %d; assuming 66MHz\n",
1172 reg & STATUS_PCIXSPD_MASK);
1173 sc->sc_bus_speed = 66;
1174 }
1175 } else
1176 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1177 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1178 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1179 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1180 }
1181
1182 /*
1183 * Allocate the control data structures, and create and load the
1184 * DMA map for it.
1185 *
1186 * NOTE: All Tx descriptors must be in the same 4G segment of
1187 * memory. So must Rx descriptors. We simplify by allocating
1188 * both sets within the same 4G segment.
1189 */
1190 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1191 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1192 cdata_size = sc->sc_type < WM_T_82544 ?
1193 sizeof(struct wm_control_data_82542) :
1194 sizeof(struct wm_control_data_82544);
1195 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1196 (bus_size_t) 0x100000000ULL,
1197 &seg, 1, &rseg, 0)) != 0) {
1198 aprint_error_dev(sc->sc_dev,
1199 "unable to allocate control data, error = %d\n",
1200 error);
1201 goto fail_0;
1202 }
1203
1204 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1205 (void **)&sc->sc_control_data,
1206 BUS_DMA_COHERENT)) != 0) {
1207 aprint_error_dev(sc->sc_dev,
1208 "unable to map control data, error = %d\n", error);
1209 goto fail_1;
1210 }
1211
1212 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1213 0, 0, &sc->sc_cddmamap)) != 0) {
1214 aprint_error_dev(sc->sc_dev,
1215 "unable to create control data DMA map, error = %d\n",
1216 error);
1217 goto fail_2;
1218 }
1219
1220 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1221 sc->sc_control_data, cdata_size, NULL,
1222 0)) != 0) {
1223 aprint_error_dev(sc->sc_dev,
1224 "unable to load control data DMA map, error = %d\n",
1225 error);
1226 goto fail_3;
1227 }
1228
1229 /*
1230 * Create the transmit buffer DMA maps.
1231 */
1232 WM_TXQUEUELEN(sc) =
1233 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1234 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1235 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1236 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1237 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1238 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1239 aprint_error_dev(sc->sc_dev,
1240 "unable to create Tx DMA map %d, error = %d\n",
1241 i, error);
1242 goto fail_4;
1243 }
1244 }
1245
1246 /*
1247 * Create the receive buffer DMA maps.
1248 */
1249 for (i = 0; i < WM_NRXDESC; i++) {
1250 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1251 MCLBYTES, 0, 0,
1252 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1253 aprint_error_dev(sc->sc_dev,
1254 "unable to create Rx DMA map %d error = %d\n",
1255 i, error);
1256 goto fail_5;
1257 }
1258 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1259 }
1260
1261 /* clear interesting stat counters */
1262 CSR_READ(sc, WMREG_COLC);
1263 CSR_READ(sc, WMREG_RXERRC);
1264
1265 /*
1266 * Reset the chip to a known state.
1267 */
1268 wm_reset(sc);
1269
1270 switch (sc->sc_type) {
1271 case WM_T_82571:
1272 case WM_T_82572:
1273 case WM_T_82573:
1274 case WM_T_82574:
1275 case WM_T_82583:
1276 case WM_T_80003:
1277 case WM_T_ICH8:
1278 case WM_T_ICH9:
1279 case WM_T_ICH10:
1280 if (wm_check_mng_mode(sc) != 0)
1281 wm_get_hw_control(sc);
1282 break;
1283 default:
1284 break;
1285 }
1286
1287 /*
1288 * Get some information about the EEPROM.
1289 */
1290 switch (sc->sc_type) {
1291 case WM_T_82542_2_0:
1292 case WM_T_82542_2_1:
1293 case WM_T_82543:
1294 case WM_T_82544:
1295 /* Microwire */
1296 sc->sc_ee_addrbits = 6;
1297 break;
1298 case WM_T_82540:
1299 case WM_T_82545:
1300 case WM_T_82545_3:
1301 case WM_T_82546:
1302 case WM_T_82546_3:
1303 /* Microwire */
1304 reg = CSR_READ(sc, WMREG_EECD);
1305 if (reg & EECD_EE_SIZE)
1306 sc->sc_ee_addrbits = 8;
1307 else
1308 sc->sc_ee_addrbits = 6;
1309 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1310 break;
1311 case WM_T_82541:
1312 case WM_T_82541_2:
1313 case WM_T_82547:
1314 case WM_T_82547_2:
1315 reg = CSR_READ(sc, WMREG_EECD);
1316 if (reg & EECD_EE_TYPE) {
1317 /* SPI */
1318 wm_set_spiaddrsize(sc);
1319 } else
1320 /* Microwire */
1321 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1322 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1323 break;
1324 case WM_T_82571:
1325 case WM_T_82572:
1326 /* SPI */
1327 wm_set_spiaddrsize(sc);
1328 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1329 break;
1330 case WM_T_82573:
1331 case WM_T_82574:
1332 case WM_T_82583:
1333 if (wm_is_onboard_nvm_eeprom(sc) == 0)
1334 sc->sc_flags |= WM_F_EEPROM_FLASH;
1335 else {
1336 /* SPI */
1337 wm_set_spiaddrsize(sc);
1338 }
1339 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1340 break;
1341 case WM_T_80003:
1342 /* SPI */
1343 wm_set_spiaddrsize(sc);
1344 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1345 break;
1346 case WM_T_ICH8:
1347 case WM_T_ICH9:
1348 /* Check whether EEPROM is present or not */
1349 if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
1350 /* Not found */
1351 aprint_error_dev(sc->sc_dev,
1352 "EEPROM PRESENT bit isn't set\n");
1353 sc->sc_flags |= WM_F_EEPROM_INVALID;
1354 }
1355 /* FALLTHROUGH */
1356 case WM_T_ICH10:
1357 /* FLASH */
1358 sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1359 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1360 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1361 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1362 aprint_error_dev(sc->sc_dev,
1363 "can't map FLASH registers\n");
1364 return;
1365 }
1366 reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1367 sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1368 ICH_FLASH_SECTOR_SIZE;
1369 sc->sc_ich8_flash_bank_size =
1370 ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1371 sc->sc_ich8_flash_bank_size -=
1372 (reg & ICH_GFPREG_BASE_MASK);
1373 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1374 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1375 break;
1376 default:
1377 break;
1378 }
1379
1380 /*
1381 * Defer printing the EEPROM type until after verifying the checksum
1382 * This allows the EEPROM type to be printed correctly in the case
1383 * that no EEPROM is attached.
1384 */
1385 /*
1386 * Validate the EEPROM checksum. If the checksum fails, flag
1387 * this for later, so we can fail future reads from the EEPROM.
1388 */
1389 if (wm_validate_eeprom_checksum(sc)) {
1390 /*
1391 * Read twice again because some PCI-e parts fail the
1392 * first check due to the link being in sleep state.
1393 */
1394 if (wm_validate_eeprom_checksum(sc))
1395 sc->sc_flags |= WM_F_EEPROM_INVALID;
1396 }
1397
1398 /* Set device properties (macflags) */
1399 prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1400
1401 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1402 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1403 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1404 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1405 } else {
1406 if (sc->sc_flags & WM_F_EEPROM_SPI)
1407 eetype = "SPI";
1408 else
1409 eetype = "MicroWire";
1410 aprint_verbose_dev(sc->sc_dev,
1411 "%u word (%d address bits) %s EEPROM\n",
1412 1U << sc->sc_ee_addrbits,
1413 sc->sc_ee_addrbits, eetype);
1414 }
1415
1416 /*
1417 * Read the Ethernet address from the EEPROM, if not first found
1418 * in device properties.
1419 */
1420 ea = prop_dictionary_get(dict, "mac-addr");
1421 if (ea != NULL) {
1422 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1423 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1424 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1425 } else {
1426 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1427 sizeof(myea) / sizeof(myea[0]), myea)) {
1428 aprint_error_dev(sc->sc_dev,
1429 "unable to read Ethernet address\n");
1430 return;
1431 }
1432 enaddr[0] = myea[0] & 0xff;
1433 enaddr[1] = myea[0] >> 8;
1434 enaddr[2] = myea[1] & 0xff;
1435 enaddr[3] = myea[1] >> 8;
1436 enaddr[4] = myea[2] & 0xff;
1437 enaddr[5] = myea[2] >> 8;
1438 }
1439
1440 /*
1441 * Toggle the LSB of the MAC address on the second port
1442 * of the dual port controller.
1443 */
1444 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1445 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1446 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1447 enaddr[5] ^= 1;
1448 }
1449
1450 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1451 ether_sprintf(enaddr));
1452
1453 /*
1454 * Read the config info from the EEPROM, and set up various
1455 * bits in the control registers based on their contents.
1456 */
1457 pn = prop_dictionary_get(dict, "i82543-cfg1");
1458 if (pn != NULL) {
1459 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1460 cfg1 = (uint16_t) prop_number_integer_value(pn);
1461 } else {
1462 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1463 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1464 return;
1465 }
1466 }
1467
1468 pn = prop_dictionary_get(dict, "i82543-cfg2");
1469 if (pn != NULL) {
1470 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1471 cfg2 = (uint16_t) prop_number_integer_value(pn);
1472 } else {
1473 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1474 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1475 return;
1476 }
1477 }
1478
1479 if (sc->sc_type >= WM_T_82544) {
1480 pn = prop_dictionary_get(dict, "i82543-swdpin");
1481 if (pn != NULL) {
1482 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1483 swdpin = (uint16_t) prop_number_integer_value(pn);
1484 } else {
1485 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1486 aprint_error_dev(sc->sc_dev,
1487 "unable to read SWDPIN\n");
1488 return;
1489 }
1490 }
1491 }
1492
1493 if (cfg1 & EEPROM_CFG1_ILOS)
1494 sc->sc_ctrl |= CTRL_ILOS;
1495 if (sc->sc_type >= WM_T_82544) {
1496 sc->sc_ctrl |=
1497 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1498 CTRL_SWDPIO_SHIFT;
1499 sc->sc_ctrl |=
1500 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1501 CTRL_SWDPINS_SHIFT;
1502 } else {
1503 sc->sc_ctrl |=
1504 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1505 CTRL_SWDPIO_SHIFT;
1506 }
1507
1508 #if 0
1509 if (sc->sc_type >= WM_T_82544) {
1510 if (cfg1 & EEPROM_CFG1_IPS0)
1511 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1512 if (cfg1 & EEPROM_CFG1_IPS1)
1513 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1514 sc->sc_ctrl_ext |=
1515 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1516 CTRL_EXT_SWDPIO_SHIFT;
1517 sc->sc_ctrl_ext |=
1518 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1519 CTRL_EXT_SWDPINS_SHIFT;
1520 } else {
1521 sc->sc_ctrl_ext |=
1522 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1523 CTRL_EXT_SWDPIO_SHIFT;
1524 }
1525 #endif
1526
1527 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1528 #if 0
1529 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1530 #endif
1531
1532 /*
1533 * Set up some register offsets that are different between
1534 * the i82542 and the i82543 and later chips.
1535 */
1536 if (sc->sc_type < WM_T_82543) {
1537 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1538 sc->sc_tdt_reg = WMREG_OLD_TDT;
1539 } else {
1540 sc->sc_rdt_reg = WMREG_RDT;
1541 sc->sc_tdt_reg = WMREG_TDT;
1542 }
1543
1544 /*
1545 * Determine if we're TBI or GMII mode, and initialize the
1546 * media structures accordingly.
1547 */
1548 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1549 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1550 || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1551 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1552 wm_gmii_mediainit(sc);
1553 } else if (sc->sc_type < WM_T_82543 ||
1554 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1555 if (wmp->wmp_flags & WMP_F_1000T)
1556 aprint_error_dev(sc->sc_dev,
1557 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1558 wm_tbi_mediainit(sc);
1559 } else {
1560 if (wmp->wmp_flags & WMP_F_1000X)
1561 aprint_error_dev(sc->sc_dev,
1562 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1563 wm_gmii_mediainit(sc);
1564 }
1565
1566 ifp = &sc->sc_ethercom.ec_if;
1567 xname = device_xname(sc->sc_dev);
1568 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1569 ifp->if_softc = sc;
1570 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1571 ifp->if_ioctl = wm_ioctl;
1572 ifp->if_start = wm_start;
1573 ifp->if_watchdog = wm_watchdog;
1574 ifp->if_init = wm_init;
1575 ifp->if_stop = wm_stop;
1576 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1577 IFQ_SET_READY(&ifp->if_snd);
1578
1579 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1580 sc->sc_type != WM_T_82583 && sc->sc_type != WM_T_ICH8)
1581 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1582
1583 /*
1584 * If we're a i82543 or greater, we can support VLANs.
1585 */
1586 if (sc->sc_type >= WM_T_82543)
1587 sc->sc_ethercom.ec_capabilities |=
1588 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1589
1590 /*
1591 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1592 * on i82543 and later.
1593 */
1594 if (sc->sc_type >= WM_T_82543) {
1595 ifp->if_capabilities |=
1596 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1597 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1598 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1599 IFCAP_CSUM_TCPv6_Tx |
1600 IFCAP_CSUM_UDPv6_Tx;
1601 }
1602
1603 /*
1604 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1605 *
1606 * 82541GI (8086:1076) ... no
1607 * 82572EI (8086:10b9) ... yes
1608 */
1609 if (sc->sc_type >= WM_T_82571) {
1610 ifp->if_capabilities |=
1611 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1612 }
1613
1614 /*
1615 * If we're a i82544 or greater (except i82547), we can do
1616 * TCP segmentation offload.
1617 */
1618 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1619 ifp->if_capabilities |= IFCAP_TSOv4;
1620 }
1621
1622 if (sc->sc_type >= WM_T_82571) {
1623 ifp->if_capabilities |= IFCAP_TSOv6;
1624 }
1625
1626 /*
1627 * Attach the interface.
1628 */
1629 if_attach(ifp);
1630 ether_ifattach(ifp, enaddr);
1631 #if NRND > 0
1632 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1633 #endif
1634
1635 #ifdef WM_EVENT_COUNTERS
1636 /* Attach event counters. */
1637 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1638 NULL, xname, "txsstall");
1639 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1640 NULL, xname, "txdstall");
1641 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1642 NULL, xname, "txfifo_stall");
1643 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1644 NULL, xname, "txdw");
1645 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1646 NULL, xname, "txqe");
1647 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1648 NULL, xname, "rxintr");
1649 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1650 NULL, xname, "linkintr");
1651
1652 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1653 NULL, xname, "rxipsum");
1654 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1655 NULL, xname, "rxtusum");
1656 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1657 NULL, xname, "txipsum");
1658 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1659 NULL, xname, "txtusum");
1660 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1661 NULL, xname, "txtusum6");
1662
1663 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1664 NULL, xname, "txtso");
1665 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1666 NULL, xname, "txtso6");
1667 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1668 NULL, xname, "txtsopain");
1669
1670 for (i = 0; i < WM_NTXSEGS; i++) {
1671 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1672 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1673 NULL, xname, wm_txseg_evcnt_names[i]);
1674 }
1675
1676 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1677 NULL, xname, "txdrop");
1678
1679 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1680 NULL, xname, "tu");
1681
1682 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1683 NULL, xname, "tx_xoff");
1684 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1685 NULL, xname, "tx_xon");
1686 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1687 NULL, xname, "rx_xoff");
1688 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1689 NULL, xname, "rx_xon");
1690 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1691 NULL, xname, "rx_macctl");
1692 #endif /* WM_EVENT_COUNTERS */
1693
1694 if (pmf_device_register(self, NULL, NULL))
1695 pmf_class_network_register(self, ifp);
1696 else
1697 aprint_error_dev(self, "couldn't establish power handler\n");
1698
1699 return;
1700
1701 /*
1702 * Free any resources we've allocated during the failed attach
1703 * attempt. Do this in reverse order and fall through.
1704 */
1705 fail_5:
1706 for (i = 0; i < WM_NRXDESC; i++) {
1707 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1708 bus_dmamap_destroy(sc->sc_dmat,
1709 sc->sc_rxsoft[i].rxs_dmamap);
1710 }
1711 fail_4:
1712 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1713 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1714 bus_dmamap_destroy(sc->sc_dmat,
1715 sc->sc_txsoft[i].txs_dmamap);
1716 }
1717 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1718 fail_3:
1719 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1720 fail_2:
1721 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1722 cdata_size);
1723 fail_1:
1724 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1725 fail_0:
1726 return;
1727 }
1728
1729 /*
1730 * wm_tx_offload:
1731 *
1732 * Set up TCP/IP checksumming parameters for the
1733 * specified packet.
1734 */
1735 static int
1736 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1737 uint8_t *fieldsp)
1738 {
1739 struct mbuf *m0 = txs->txs_mbuf;
1740 struct livengood_tcpip_ctxdesc *t;
1741 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1742 uint32_t ipcse;
1743 struct ether_header *eh;
1744 int offset, iphl;
1745 uint8_t fields;
1746
1747 /*
1748 * XXX It would be nice if the mbuf pkthdr had offset
1749 * fields for the protocol headers.
1750 */
1751
1752 eh = mtod(m0, struct ether_header *);
1753 switch (htons(eh->ether_type)) {
1754 case ETHERTYPE_IP:
1755 case ETHERTYPE_IPV6:
1756 offset = ETHER_HDR_LEN;
1757 break;
1758
1759 case ETHERTYPE_VLAN:
1760 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1761 break;
1762
1763 default:
1764 /*
1765 * Don't support this protocol or encapsulation.
1766 */
1767 *fieldsp = 0;
1768 *cmdp = 0;
1769 return (0);
1770 }
1771
1772 if ((m0->m_pkthdr.csum_flags &
1773 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1774 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1775 } else {
1776 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1777 }
1778 ipcse = offset + iphl - 1;
1779
1780 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1781 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1782 seg = 0;
1783 fields = 0;
1784
1785 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1786 int hlen = offset + iphl;
1787 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1788
1789 if (__predict_false(m0->m_len <
1790 (hlen + sizeof(struct tcphdr)))) {
1791 /*
1792 * TCP/IP headers are not in the first mbuf; we need
1793 * to do this the slow and painful way. Let's just
1794 * hope this doesn't happen very often.
1795 */
1796 struct tcphdr th;
1797
1798 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1799
1800 m_copydata(m0, hlen, sizeof(th), &th);
1801 if (v4) {
1802 struct ip ip;
1803
1804 m_copydata(m0, offset, sizeof(ip), &ip);
1805 ip.ip_len = 0;
1806 m_copyback(m0,
1807 offset + offsetof(struct ip, ip_len),
1808 sizeof(ip.ip_len), &ip.ip_len);
1809 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1810 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1811 } else {
1812 struct ip6_hdr ip6;
1813
1814 m_copydata(m0, offset, sizeof(ip6), &ip6);
1815 ip6.ip6_plen = 0;
1816 m_copyback(m0,
1817 offset + offsetof(struct ip6_hdr, ip6_plen),
1818 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1819 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1820 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1821 }
1822 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1823 sizeof(th.th_sum), &th.th_sum);
1824
1825 hlen += th.th_off << 2;
1826 } else {
1827 /*
1828 * TCP/IP headers are in the first mbuf; we can do
1829 * this the easy way.
1830 */
1831 struct tcphdr *th;
1832
1833 if (v4) {
1834 struct ip *ip =
1835 (void *)(mtod(m0, char *) + offset);
1836 th = (void *)(mtod(m0, char *) + hlen);
1837
1838 ip->ip_len = 0;
1839 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1840 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1841 } else {
1842 struct ip6_hdr *ip6 =
1843 (void *)(mtod(m0, char *) + offset);
1844 th = (void *)(mtod(m0, char *) + hlen);
1845
1846 ip6->ip6_plen = 0;
1847 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1848 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1849 }
1850 hlen += th->th_off << 2;
1851 }
1852
1853 if (v4) {
1854 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1855 cmdlen |= WTX_TCPIP_CMD_IP;
1856 } else {
1857 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1858 ipcse = 0;
1859 }
1860 cmd |= WTX_TCPIP_CMD_TSE;
1861 cmdlen |= WTX_TCPIP_CMD_TSE |
1862 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1863 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1864 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1865 }
1866
1867 /*
1868 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1869 * offload feature, if we load the context descriptor, we
1870 * MUST provide valid values for IPCSS and TUCSS fields.
1871 */
1872
1873 ipcs = WTX_TCPIP_IPCSS(offset) |
1874 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1875 WTX_TCPIP_IPCSE(ipcse);
1876 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1877 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1878 fields |= WTX_IXSM;
1879 }
1880
1881 offset += iphl;
1882
1883 if (m0->m_pkthdr.csum_flags &
1884 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1885 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1886 fields |= WTX_TXSM;
1887 tucs = WTX_TCPIP_TUCSS(offset) |
1888 WTX_TCPIP_TUCSO(offset +
1889 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1890 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1891 } else if ((m0->m_pkthdr.csum_flags &
1892 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1893 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1894 fields |= WTX_TXSM;
1895 tucs = WTX_TCPIP_TUCSS(offset) |
1896 WTX_TCPIP_TUCSO(offset +
1897 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1898 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1899 } else {
1900 /* Just initialize it to a valid TCP context. */
1901 tucs = WTX_TCPIP_TUCSS(offset) |
1902 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1903 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1904 }
1905
1906 /* Fill in the context descriptor. */
1907 t = (struct livengood_tcpip_ctxdesc *)
1908 &sc->sc_txdescs[sc->sc_txnext];
1909 t->tcpip_ipcs = htole32(ipcs);
1910 t->tcpip_tucs = htole32(tucs);
1911 t->tcpip_cmdlen = htole32(cmdlen);
1912 t->tcpip_seg = htole32(seg);
1913 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1914
1915 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1916 txs->txs_ndesc++;
1917
1918 *cmdp = cmd;
1919 *fieldsp = fields;
1920
1921 return (0);
1922 }
1923
1924 static void
1925 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1926 {
1927 struct mbuf *m;
1928 int i;
1929
1930 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1931 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1932 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1933 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1934 m->m_data, m->m_len, m->m_flags);
1935 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1936 i, i == 1 ? "" : "s");
1937 }
1938
1939 /*
1940 * wm_82547_txfifo_stall:
1941 *
1942 * Callout used to wait for the 82547 Tx FIFO to drain,
1943 * reset the FIFO pointers, and restart packet transmission.
1944 */
1945 static void
1946 wm_82547_txfifo_stall(void *arg)
1947 {
1948 struct wm_softc *sc = arg;
1949 int s;
1950
1951 s = splnet();
1952
1953 if (sc->sc_txfifo_stall) {
1954 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1955 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1956 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1957 /*
1958 * Packets have drained. Stop transmitter, reset
1959 * FIFO pointers, restart transmitter, and kick
1960 * the packet queue.
1961 */
1962 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1963 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1964 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1965 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1966 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1967 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1968 CSR_WRITE(sc, WMREG_TCTL, tctl);
1969 CSR_WRITE_FLUSH(sc);
1970
1971 sc->sc_txfifo_head = 0;
1972 sc->sc_txfifo_stall = 0;
1973 wm_start(&sc->sc_ethercom.ec_if);
1974 } else {
1975 /*
1976 * Still waiting for packets to drain; try again in
1977 * another tick.
1978 */
1979 callout_schedule(&sc->sc_txfifo_ch, 1);
1980 }
1981 }
1982
1983 splx(s);
1984 }
1985
1986 /*
1987 * wm_82547_txfifo_bugchk:
1988 *
1989 * Check for bug condition in the 82547 Tx FIFO. We need to
1990 * prevent enqueueing a packet that would wrap around the end
1991 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1992 *
1993 * We do this by checking the amount of space before the end
1994 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1995 * the Tx FIFO, wait for all remaining packets to drain, reset
1996 * the internal FIFO pointers to the beginning, and restart
1997 * transmission on the interface.
1998 */
1999 #define WM_FIFO_HDR 0x10
2000 #define WM_82547_PAD_LEN 0x3e0
2001 static int
2002 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2003 {
2004 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2005 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2006
2007 /* Just return if already stalled. */
2008 if (sc->sc_txfifo_stall)
2009 return (1);
2010
2011 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2012 /* Stall only occurs in half-duplex mode. */
2013 goto send_packet;
2014 }
2015
2016 if (len >= WM_82547_PAD_LEN + space) {
2017 sc->sc_txfifo_stall = 1;
2018 callout_schedule(&sc->sc_txfifo_ch, 1);
2019 return (1);
2020 }
2021
2022 send_packet:
2023 sc->sc_txfifo_head += len;
2024 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2025 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2026
2027 return (0);
2028 }
2029
2030 /*
2031 * wm_start: [ifnet interface function]
2032 *
2033 * Start packet transmission on the interface.
2034 */
2035 static void
2036 wm_start(struct ifnet *ifp)
2037 {
2038 struct wm_softc *sc = ifp->if_softc;
2039 struct mbuf *m0;
2040 struct m_tag *mtag;
2041 struct wm_txsoft *txs;
2042 bus_dmamap_t dmamap;
2043 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2044 bus_addr_t curaddr;
2045 bus_size_t seglen, curlen;
2046 uint32_t cksumcmd;
2047 uint8_t cksumfields;
2048
2049 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2050 return;
2051
2052 /*
2053 * Remember the previous number of free descriptors.
2054 */
2055 ofree = sc->sc_txfree;
2056
2057 /*
2058 * Loop through the send queue, setting up transmit descriptors
2059 * until we drain the queue, or use up all available transmit
2060 * descriptors.
2061 */
2062 for (;;) {
2063 /* Grab a packet off the queue. */
2064 IFQ_POLL(&ifp->if_snd, m0);
2065 if (m0 == NULL)
2066 break;
2067
2068 DPRINTF(WM_DEBUG_TX,
2069 ("%s: TX: have packet to transmit: %p\n",
2070 device_xname(sc->sc_dev), m0));
2071
2072 /* Get a work queue entry. */
2073 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2074 wm_txintr(sc);
2075 if (sc->sc_txsfree == 0) {
2076 DPRINTF(WM_DEBUG_TX,
2077 ("%s: TX: no free job descriptors\n",
2078 device_xname(sc->sc_dev)));
2079 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2080 break;
2081 }
2082 }
2083
2084 txs = &sc->sc_txsoft[sc->sc_txsnext];
2085 dmamap = txs->txs_dmamap;
2086
2087 use_tso = (m0->m_pkthdr.csum_flags &
2088 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2089
2090 /*
2091 * So says the Linux driver:
2092 * The controller does a simple calculation to make sure
2093 * there is enough room in the FIFO before initiating the
2094 * DMA for each buffer. The calc is:
2095 * 4 = ceil(buffer len / MSS)
2096 * To make sure we don't overrun the FIFO, adjust the max
2097 * buffer len if the MSS drops.
2098 */
2099 dmamap->dm_maxsegsz =
2100 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2101 ? m0->m_pkthdr.segsz << 2
2102 : WTX_MAX_LEN;
2103
2104 /*
2105 * Load the DMA map. If this fails, the packet either
2106 * didn't fit in the allotted number of segments, or we
2107 * were short on resources. For the too-many-segments
2108 * case, we simply report an error and drop the packet,
2109 * since we can't sanely copy a jumbo packet to a single
2110 * buffer.
2111 */
2112 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2113 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2114 if (error) {
2115 if (error == EFBIG) {
2116 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2117 log(LOG_ERR, "%s: Tx packet consumes too many "
2118 "DMA segments, dropping...\n",
2119 device_xname(sc->sc_dev));
2120 IFQ_DEQUEUE(&ifp->if_snd, m0);
2121 wm_dump_mbuf_chain(sc, m0);
2122 m_freem(m0);
2123 continue;
2124 }
2125 /*
2126 * Short on resources, just stop for now.
2127 */
2128 DPRINTF(WM_DEBUG_TX,
2129 ("%s: TX: dmamap load failed: %d\n",
2130 device_xname(sc->sc_dev), error));
2131 break;
2132 }
2133
2134 segs_needed = dmamap->dm_nsegs;
2135 if (use_tso) {
2136 /* For sentinel descriptor; see below. */
2137 segs_needed++;
2138 }
2139
2140 /*
2141 * Ensure we have enough descriptors free to describe
2142 * the packet. Note, we always reserve one descriptor
2143 * at the end of the ring due to the semantics of the
2144 * TDT register, plus one more in the event we need
2145 * to load offload context.
2146 */
2147 if (segs_needed > sc->sc_txfree - 2) {
2148 /*
2149 * Not enough free descriptors to transmit this
2150 * packet. We haven't committed anything yet,
2151 * so just unload the DMA map, put the packet
2152 * pack on the queue, and punt. Notify the upper
2153 * layer that there are no more slots left.
2154 */
2155 DPRINTF(WM_DEBUG_TX,
2156 ("%s: TX: need %d (%d) descriptors, have %d\n",
2157 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2158 segs_needed, sc->sc_txfree - 1));
2159 ifp->if_flags |= IFF_OACTIVE;
2160 bus_dmamap_unload(sc->sc_dmat, dmamap);
2161 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2162 break;
2163 }
2164
2165 /*
2166 * Check for 82547 Tx FIFO bug. We need to do this
2167 * once we know we can transmit the packet, since we
2168 * do some internal FIFO space accounting here.
2169 */
2170 if (sc->sc_type == WM_T_82547 &&
2171 wm_82547_txfifo_bugchk(sc, m0)) {
2172 DPRINTF(WM_DEBUG_TX,
2173 ("%s: TX: 82547 Tx FIFO bug detected\n",
2174 device_xname(sc->sc_dev)));
2175 ifp->if_flags |= IFF_OACTIVE;
2176 bus_dmamap_unload(sc->sc_dmat, dmamap);
2177 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2178 break;
2179 }
2180
2181 IFQ_DEQUEUE(&ifp->if_snd, m0);
2182
2183 /*
2184 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2185 */
2186
2187 DPRINTF(WM_DEBUG_TX,
2188 ("%s: TX: packet has %d (%d) DMA segments\n",
2189 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2190
2191 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2192
2193 /*
2194 * Store a pointer to the packet so that we can free it
2195 * later.
2196 *
2197 * Initially, we consider the number of descriptors the
2198 * packet uses the number of DMA segments. This may be
2199 * incremented by 1 if we do checksum offload (a descriptor
2200 * is used to set the checksum context).
2201 */
2202 txs->txs_mbuf = m0;
2203 txs->txs_firstdesc = sc->sc_txnext;
2204 txs->txs_ndesc = segs_needed;
2205
2206 /* Set up offload parameters for this packet. */
2207 if (m0->m_pkthdr.csum_flags &
2208 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2209 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2210 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2211 if (wm_tx_offload(sc, txs, &cksumcmd,
2212 &cksumfields) != 0) {
2213 /* Error message already displayed. */
2214 bus_dmamap_unload(sc->sc_dmat, dmamap);
2215 continue;
2216 }
2217 } else {
2218 cksumcmd = 0;
2219 cksumfields = 0;
2220 }
2221
2222 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2223
2224 /* Sync the DMA map. */
2225 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2226 BUS_DMASYNC_PREWRITE);
2227
2228 /*
2229 * Initialize the transmit descriptor.
2230 */
2231 for (nexttx = sc->sc_txnext, seg = 0;
2232 seg < dmamap->dm_nsegs; seg++) {
2233 for (seglen = dmamap->dm_segs[seg].ds_len,
2234 curaddr = dmamap->dm_segs[seg].ds_addr;
2235 seglen != 0;
2236 curaddr += curlen, seglen -= curlen,
2237 nexttx = WM_NEXTTX(sc, nexttx)) {
2238 curlen = seglen;
2239
2240 /*
2241 * So says the Linux driver:
2242 * Work around for premature descriptor
2243 * write-backs in TSO mode. Append a
2244 * 4-byte sentinel descriptor.
2245 */
2246 if (use_tso &&
2247 seg == dmamap->dm_nsegs - 1 &&
2248 curlen > 8)
2249 curlen -= 4;
2250
2251 wm_set_dma_addr(
2252 &sc->sc_txdescs[nexttx].wtx_addr,
2253 curaddr);
2254 sc->sc_txdescs[nexttx].wtx_cmdlen =
2255 htole32(cksumcmd | curlen);
2256 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2257 0;
2258 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2259 cksumfields;
2260 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2261 lasttx = nexttx;
2262
2263 DPRINTF(WM_DEBUG_TX,
2264 ("%s: TX: desc %d: low 0x%08lx, "
2265 "len 0x%04x\n",
2266 device_xname(sc->sc_dev), nexttx,
2267 curaddr & 0xffffffffUL, (unsigned)curlen));
2268 }
2269 }
2270
2271 KASSERT(lasttx != -1);
2272
2273 /*
2274 * Set up the command byte on the last descriptor of
2275 * the packet. If we're in the interrupt delay window,
2276 * delay the interrupt.
2277 */
2278 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2279 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2280
2281 /*
2282 * If VLANs are enabled and the packet has a VLAN tag, set
2283 * up the descriptor to encapsulate the packet for us.
2284 *
2285 * This is only valid on the last descriptor of the packet.
2286 */
2287 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2288 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2289 htole32(WTX_CMD_VLE);
2290 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2291 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2292 }
2293
2294 txs->txs_lastdesc = lasttx;
2295
2296 DPRINTF(WM_DEBUG_TX,
2297 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2298 device_xname(sc->sc_dev),
2299 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2300
2301 /* Sync the descriptors we're using. */
2302 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2303 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2304
2305 /* Give the packet to the chip. */
2306 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2307
2308 DPRINTF(WM_DEBUG_TX,
2309 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2310
2311 DPRINTF(WM_DEBUG_TX,
2312 ("%s: TX: finished transmitting packet, job %d\n",
2313 device_xname(sc->sc_dev), sc->sc_txsnext));
2314
2315 /* Advance the tx pointer. */
2316 sc->sc_txfree -= txs->txs_ndesc;
2317 sc->sc_txnext = nexttx;
2318
2319 sc->sc_txsfree--;
2320 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2321
2322 #if NBPFILTER > 0
2323 /* Pass the packet to any BPF listeners. */
2324 if (ifp->if_bpf)
2325 bpf_mtap(ifp->if_bpf, m0);
2326 #endif /* NBPFILTER > 0 */
2327 }
2328
2329 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2330 /* No more slots; notify upper layer. */
2331 ifp->if_flags |= IFF_OACTIVE;
2332 }
2333
2334 if (sc->sc_txfree != ofree) {
2335 /* Set a watchdog timer in case the chip flakes out. */
2336 ifp->if_timer = 5;
2337 }
2338 }
2339
2340 /*
2341 * wm_watchdog: [ifnet interface function]
2342 *
2343 * Watchdog timer handler.
2344 */
2345 static void
2346 wm_watchdog(struct ifnet *ifp)
2347 {
2348 struct wm_softc *sc = ifp->if_softc;
2349
2350 /*
2351 * Since we're using delayed interrupts, sweep up
2352 * before we report an error.
2353 */
2354 wm_txintr(sc);
2355
2356 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2357 log(LOG_ERR,
2358 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2359 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2360 sc->sc_txnext);
2361 ifp->if_oerrors++;
2362
2363 /* Reset the interface. */
2364 (void) wm_init(ifp);
2365 }
2366
2367 /* Try to get more packets going. */
2368 wm_start(ifp);
2369 }
2370
2371 /*
2372 * wm_ioctl: [ifnet interface function]
2373 *
2374 * Handle control requests from the operator.
2375 */
2376 static int
2377 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2378 {
2379 struct wm_softc *sc = ifp->if_softc;
2380 struct ifreq *ifr = (struct ifreq *) data;
2381 struct ifaddr *ifa = (struct ifaddr *)data;
2382 struct sockaddr_dl *sdl;
2383 int diff, s, error;
2384
2385 s = splnet();
2386
2387 switch (cmd) {
2388 case SIOCSIFFLAGS:
2389 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2390 break;
2391 if (ifp->if_flags & IFF_UP) {
2392 diff = (ifp->if_flags ^ sc->sc_if_flags)
2393 & (IFF_PROMISC | IFF_ALLMULTI);
2394 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2395 /*
2396 * If the difference bettween last flag and
2397 * new flag is only IFF_PROMISC or
2398 * IFF_ALLMULTI, set multicast filter only
2399 * (don't reset to prevent link down).
2400 */
2401 wm_set_filter(sc);
2402 } else {
2403 /*
2404 * Reset the interface to pick up changes in
2405 * any other flags that affect the hardware
2406 * state.
2407 */
2408 wm_init(ifp);
2409 }
2410 } else {
2411 if (ifp->if_flags & IFF_RUNNING)
2412 wm_stop(ifp, 1);
2413 }
2414 sc->sc_if_flags = ifp->if_flags;
2415 error = 0;
2416 break;
2417 case SIOCSIFMEDIA:
2418 case SIOCGIFMEDIA:
2419 /* Flow control requires full-duplex mode. */
2420 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2421 (ifr->ifr_media & IFM_FDX) == 0)
2422 ifr->ifr_media &= ~IFM_ETH_FMASK;
2423 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2424 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2425 /* We can do both TXPAUSE and RXPAUSE. */
2426 ifr->ifr_media |=
2427 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2428 }
2429 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2430 }
2431 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2432 break;
2433 case SIOCINITIFADDR:
2434 if (ifa->ifa_addr->sa_family == AF_LINK) {
2435 sdl = satosdl(ifp->if_dl->ifa_addr);
2436 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2437 LLADDR(satosdl(ifa->ifa_addr)),
2438 ifp->if_addrlen);
2439 /* unicast address is first multicast entry */
2440 wm_set_filter(sc);
2441 error = 0;
2442 break;
2443 }
2444 /* Fall through for rest */
2445 default:
2446 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2447 break;
2448
2449 error = 0;
2450
2451 if (cmd == SIOCSIFCAP)
2452 error = (*ifp->if_init)(ifp);
2453 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2454 ;
2455 else if (ifp->if_flags & IFF_RUNNING) {
2456 /*
2457 * Multicast list has changed; set the hardware filter
2458 * accordingly.
2459 */
2460 wm_set_filter(sc);
2461 }
2462 break;
2463 }
2464
2465 /* Try to get more packets going. */
2466 wm_start(ifp);
2467
2468 splx(s);
2469 return (error);
2470 }
2471
2472 /*
2473 * wm_intr:
2474 *
2475 * Interrupt service routine.
2476 */
2477 static int
2478 wm_intr(void *arg)
2479 {
2480 struct wm_softc *sc = arg;
2481 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2482 uint32_t icr;
2483 int handled = 0;
2484
2485 while (1 /* CONSTCOND */) {
2486 icr = CSR_READ(sc, WMREG_ICR);
2487 if ((icr & sc->sc_icr) == 0)
2488 break;
2489 #if 0 /*NRND > 0*/
2490 if (RND_ENABLED(&sc->rnd_source))
2491 rnd_add_uint32(&sc->rnd_source, icr);
2492 #endif
2493
2494 handled = 1;
2495
2496 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2497 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2498 DPRINTF(WM_DEBUG_RX,
2499 ("%s: RX: got Rx intr 0x%08x\n",
2500 device_xname(sc->sc_dev),
2501 icr & (ICR_RXDMT0|ICR_RXT0)));
2502 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2503 }
2504 #endif
2505 wm_rxintr(sc);
2506
2507 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2508 if (icr & ICR_TXDW) {
2509 DPRINTF(WM_DEBUG_TX,
2510 ("%s: TX: got TXDW interrupt\n",
2511 device_xname(sc->sc_dev)));
2512 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2513 }
2514 #endif
2515 wm_txintr(sc);
2516
2517 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2518 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2519 wm_linkintr(sc, icr);
2520 }
2521
2522 if (icr & ICR_RXO) {
2523 ifp->if_ierrors++;
2524 #if defined(WM_DEBUG)
2525 log(LOG_WARNING, "%s: Receive overrun\n",
2526 device_xname(sc->sc_dev));
2527 #endif /* defined(WM_DEBUG) */
2528 }
2529 }
2530
2531 if (handled) {
2532 /* Try to get more packets going. */
2533 wm_start(ifp);
2534 }
2535
2536 return (handled);
2537 }
2538
2539 /*
2540 * wm_txintr:
2541 *
2542 * Helper; handle transmit interrupts.
2543 */
2544 static void
2545 wm_txintr(struct wm_softc *sc)
2546 {
2547 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2548 struct wm_txsoft *txs;
2549 uint8_t status;
2550 int i;
2551
2552 ifp->if_flags &= ~IFF_OACTIVE;
2553
2554 /*
2555 * Go through the Tx list and free mbufs for those
2556 * frames which have been transmitted.
2557 */
2558 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2559 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2560 txs = &sc->sc_txsoft[i];
2561
2562 DPRINTF(WM_DEBUG_TX,
2563 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2564
2565 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2566 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2567
2568 status =
2569 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2570 if ((status & WTX_ST_DD) == 0) {
2571 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2572 BUS_DMASYNC_PREREAD);
2573 break;
2574 }
2575
2576 DPRINTF(WM_DEBUG_TX,
2577 ("%s: TX: job %d done: descs %d..%d\n",
2578 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2579 txs->txs_lastdesc));
2580
2581 /*
2582 * XXX We should probably be using the statistics
2583 * XXX registers, but I don't know if they exist
2584 * XXX on chips before the i82544.
2585 */
2586
2587 #ifdef WM_EVENT_COUNTERS
2588 if (status & WTX_ST_TU)
2589 WM_EVCNT_INCR(&sc->sc_ev_tu);
2590 #endif /* WM_EVENT_COUNTERS */
2591
2592 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2593 ifp->if_oerrors++;
2594 if (status & WTX_ST_LC)
2595 log(LOG_WARNING, "%s: late collision\n",
2596 device_xname(sc->sc_dev));
2597 else if (status & WTX_ST_EC) {
2598 ifp->if_collisions += 16;
2599 log(LOG_WARNING, "%s: excessive collisions\n",
2600 device_xname(sc->sc_dev));
2601 }
2602 } else
2603 ifp->if_opackets++;
2604
2605 sc->sc_txfree += txs->txs_ndesc;
2606 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2607 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2608 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2609 m_freem(txs->txs_mbuf);
2610 txs->txs_mbuf = NULL;
2611 }
2612
2613 /* Update the dirty transmit buffer pointer. */
2614 sc->sc_txsdirty = i;
2615 DPRINTF(WM_DEBUG_TX,
2616 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2617
2618 /*
2619 * If there are no more pending transmissions, cancel the watchdog
2620 * timer.
2621 */
2622 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2623 ifp->if_timer = 0;
2624 }
2625
2626 /*
2627 * wm_rxintr:
2628 *
2629 * Helper; handle receive interrupts.
2630 */
2631 static void
2632 wm_rxintr(struct wm_softc *sc)
2633 {
2634 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2635 struct wm_rxsoft *rxs;
2636 struct mbuf *m;
2637 int i, len;
2638 uint8_t status, errors;
2639 uint16_t vlantag;
2640
2641 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2642 rxs = &sc->sc_rxsoft[i];
2643
2644 DPRINTF(WM_DEBUG_RX,
2645 ("%s: RX: checking descriptor %d\n",
2646 device_xname(sc->sc_dev), i));
2647
2648 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2649
2650 status = sc->sc_rxdescs[i].wrx_status;
2651 errors = sc->sc_rxdescs[i].wrx_errors;
2652 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2653 vlantag = sc->sc_rxdescs[i].wrx_special;
2654
2655 if ((status & WRX_ST_DD) == 0) {
2656 /*
2657 * We have processed all of the receive descriptors.
2658 */
2659 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2660 break;
2661 }
2662
2663 if (__predict_false(sc->sc_rxdiscard)) {
2664 DPRINTF(WM_DEBUG_RX,
2665 ("%s: RX: discarding contents of descriptor %d\n",
2666 device_xname(sc->sc_dev), i));
2667 WM_INIT_RXDESC(sc, i);
2668 if (status & WRX_ST_EOP) {
2669 /* Reset our state. */
2670 DPRINTF(WM_DEBUG_RX,
2671 ("%s: RX: resetting rxdiscard -> 0\n",
2672 device_xname(sc->sc_dev)));
2673 sc->sc_rxdiscard = 0;
2674 }
2675 continue;
2676 }
2677
2678 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2679 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2680
2681 m = rxs->rxs_mbuf;
2682
2683 /*
2684 * Add a new receive buffer to the ring, unless of
2685 * course the length is zero. Treat the latter as a
2686 * failed mapping.
2687 */
2688 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2689 /*
2690 * Failed, throw away what we've done so
2691 * far, and discard the rest of the packet.
2692 */
2693 ifp->if_ierrors++;
2694 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2695 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2696 WM_INIT_RXDESC(sc, i);
2697 if ((status & WRX_ST_EOP) == 0)
2698 sc->sc_rxdiscard = 1;
2699 if (sc->sc_rxhead != NULL)
2700 m_freem(sc->sc_rxhead);
2701 WM_RXCHAIN_RESET(sc);
2702 DPRINTF(WM_DEBUG_RX,
2703 ("%s: RX: Rx buffer allocation failed, "
2704 "dropping packet%s\n", device_xname(sc->sc_dev),
2705 sc->sc_rxdiscard ? " (discard)" : ""));
2706 continue;
2707 }
2708
2709 m->m_len = len;
2710 sc->sc_rxlen += len;
2711 DPRINTF(WM_DEBUG_RX,
2712 ("%s: RX: buffer at %p len %d\n",
2713 device_xname(sc->sc_dev), m->m_data, len));
2714
2715 /*
2716 * If this is not the end of the packet, keep
2717 * looking.
2718 */
2719 if ((status & WRX_ST_EOP) == 0) {
2720 WM_RXCHAIN_LINK(sc, m);
2721 DPRINTF(WM_DEBUG_RX,
2722 ("%s: RX: not yet EOP, rxlen -> %d\n",
2723 device_xname(sc->sc_dev), sc->sc_rxlen));
2724 continue;
2725 }
2726
2727 /*
2728 * Okay, we have the entire packet now. The chip is
2729 * configured to include the FCS (not all chips can
2730 * be configured to strip it), so we need to trim it.
2731 * May need to adjust length of previous mbuf in the
2732 * chain if the current mbuf is too short.
2733 */
2734 if (m->m_len < ETHER_CRC_LEN) {
2735 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2736 m->m_len = 0;
2737 } else {
2738 m->m_len -= ETHER_CRC_LEN;
2739 }
2740 len = sc->sc_rxlen - ETHER_CRC_LEN;
2741
2742 WM_RXCHAIN_LINK(sc, m);
2743
2744 *sc->sc_rxtailp = NULL;
2745 m = sc->sc_rxhead;
2746
2747 WM_RXCHAIN_RESET(sc);
2748
2749 DPRINTF(WM_DEBUG_RX,
2750 ("%s: RX: have entire packet, len -> %d\n",
2751 device_xname(sc->sc_dev), len));
2752
2753 /*
2754 * If an error occurred, update stats and drop the packet.
2755 */
2756 if (errors &
2757 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2758 ifp->if_ierrors++;
2759 if (errors & WRX_ER_SE)
2760 log(LOG_WARNING, "%s: symbol error\n",
2761 device_xname(sc->sc_dev));
2762 else if (errors & WRX_ER_SEQ)
2763 log(LOG_WARNING, "%s: receive sequence error\n",
2764 device_xname(sc->sc_dev));
2765 else if (errors & WRX_ER_CE)
2766 log(LOG_WARNING, "%s: CRC error\n",
2767 device_xname(sc->sc_dev));
2768 m_freem(m);
2769 continue;
2770 }
2771
2772 /*
2773 * No errors. Receive the packet.
2774 */
2775 m->m_pkthdr.rcvif = ifp;
2776 m->m_pkthdr.len = len;
2777
2778 /*
2779 * If VLANs are enabled, VLAN packets have been unwrapped
2780 * for us. Associate the tag with the packet.
2781 */
2782 if ((status & WRX_ST_VP) != 0) {
2783 VLAN_INPUT_TAG(ifp, m,
2784 le16toh(vlantag),
2785 continue);
2786 }
2787
2788 /*
2789 * Set up checksum info for this packet.
2790 */
2791 if ((status & WRX_ST_IXSM) == 0) {
2792 if (status & WRX_ST_IPCS) {
2793 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2794 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2795 if (errors & WRX_ER_IPE)
2796 m->m_pkthdr.csum_flags |=
2797 M_CSUM_IPv4_BAD;
2798 }
2799 if (status & WRX_ST_TCPCS) {
2800 /*
2801 * Note: we don't know if this was TCP or UDP,
2802 * so we just set both bits, and expect the
2803 * upper layers to deal.
2804 */
2805 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2806 m->m_pkthdr.csum_flags |=
2807 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2808 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2809 if (errors & WRX_ER_TCPE)
2810 m->m_pkthdr.csum_flags |=
2811 M_CSUM_TCP_UDP_BAD;
2812 }
2813 }
2814
2815 ifp->if_ipackets++;
2816
2817 #if NBPFILTER > 0
2818 /* Pass this up to any BPF listeners. */
2819 if (ifp->if_bpf)
2820 bpf_mtap(ifp->if_bpf, m);
2821 #endif /* NBPFILTER > 0 */
2822
2823 /* Pass it on. */
2824 (*ifp->if_input)(ifp, m);
2825 }
2826
2827 /* Update the receive pointer. */
2828 sc->sc_rxptr = i;
2829
2830 DPRINTF(WM_DEBUG_RX,
2831 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2832 }
2833
2834 /*
2835 * wm_linkintr:
2836 *
2837 * Helper; handle link interrupts.
2838 */
2839 static void
2840 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2841 {
2842 uint32_t status;
2843
2844 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2845 __func__));
2846 /*
2847 * If we get a link status interrupt on a 1000BASE-T
2848 * device, just fall into the normal MII tick path.
2849 */
2850 if (sc->sc_flags & WM_F_HAS_MII) {
2851 if (icr & ICR_LSC) {
2852 DPRINTF(WM_DEBUG_LINK,
2853 ("%s: LINK: LSC -> mii_tick\n",
2854 device_xname(sc->sc_dev)));
2855 mii_tick(&sc->sc_mii);
2856 if (sc->sc_type == WM_T_82543) {
2857 int miistatus, active;
2858
2859 /*
2860 * With 82543, we need to force speed and
2861 * duplex on the MAC equal to what the PHY
2862 * speed and duplex configuration is.
2863 */
2864 miistatus = sc->sc_mii.mii_media_status;
2865
2866 if (miistatus & IFM_ACTIVE) {
2867 active = sc->sc_mii.mii_media_active;
2868 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2869 | CTRL_FD);
2870 switch (IFM_SUBTYPE(active)) {
2871 case IFM_10_T:
2872 sc->sc_ctrl |= CTRL_SPEED_10;
2873 break;
2874 case IFM_100_TX:
2875 sc->sc_ctrl |= CTRL_SPEED_100;
2876 break;
2877 case IFM_1000_T:
2878 sc->sc_ctrl |= CTRL_SPEED_1000;
2879 break;
2880 default:
2881 /*
2882 * fiber?
2883 * Shoud not enter here.
2884 */
2885 printf("unknown media (%x)\n",
2886 active);
2887 break;
2888 }
2889 if (active & IFM_FDX)
2890 sc->sc_ctrl |= CTRL_FD;
2891 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2892 }
2893 }
2894 } else if (icr & ICR_RXSEQ) {
2895 DPRINTF(WM_DEBUG_LINK,
2896 ("%s: LINK Receive sequence error\n",
2897 device_xname(sc->sc_dev)));
2898 }
2899 return;
2900 }
2901
2902 status = CSR_READ(sc, WMREG_STATUS);
2903 if (icr & ICR_LSC) {
2904 if (status & STATUS_LU) {
2905 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2906 device_xname(sc->sc_dev),
2907 (status & STATUS_FD) ? "FDX" : "HDX"));
2908 /*
2909 * NOTE: CTRL will update TFCE and RFCE automatically,
2910 * so we should update sc->sc_ctrl
2911 */
2912
2913 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2914 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2915 sc->sc_fcrtl &= ~FCRTL_XONE;
2916 if (status & STATUS_FD)
2917 sc->sc_tctl |=
2918 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2919 else
2920 sc->sc_tctl |=
2921 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2922 if (sc->sc_ctrl & CTRL_TFCE)
2923 sc->sc_fcrtl |= FCRTL_XONE;
2924 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2925 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2926 WMREG_OLD_FCRTL : WMREG_FCRTL,
2927 sc->sc_fcrtl);
2928 sc->sc_tbi_linkup = 1;
2929 } else {
2930 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2931 device_xname(sc->sc_dev)));
2932 sc->sc_tbi_linkup = 0;
2933 }
2934 wm_tbi_set_linkled(sc);
2935 } else if (icr & ICR_RXCFG) {
2936 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2937 device_xname(sc->sc_dev)));
2938 sc->sc_tbi_nrxcfg++;
2939 wm_check_for_link(sc);
2940 } else if (icr & ICR_RXSEQ) {
2941 DPRINTF(WM_DEBUG_LINK,
2942 ("%s: LINK: Receive sequence error\n",
2943 device_xname(sc->sc_dev)));
2944 }
2945 }
2946
2947 /*
2948 * wm_tick:
2949 *
2950 * One second timer, used to check link status, sweep up
2951 * completed transmit jobs, etc.
2952 */
2953 static void
2954 wm_tick(void *arg)
2955 {
2956 struct wm_softc *sc = arg;
2957 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2958 int s;
2959
2960 s = splnet();
2961
2962 if (sc->sc_type >= WM_T_82542_2_1) {
2963 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2964 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2965 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2966 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2967 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2968 }
2969
2970 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2971 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2972
2973 if (sc->sc_flags & WM_F_HAS_MII)
2974 mii_tick(&sc->sc_mii);
2975 else
2976 wm_tbi_check_link(sc);
2977
2978 splx(s);
2979
2980 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2981 }
2982
2983 /*
2984 * wm_reset:
2985 *
2986 * Reset the i82542 chip.
2987 */
2988 static void
2989 wm_reset(struct wm_softc *sc)
2990 {
2991 uint32_t reg;
2992
2993 /*
2994 * Allocate on-chip memory according to the MTU size.
2995 * The Packet Buffer Allocation register must be written
2996 * before the chip is reset.
2997 */
2998 switch (sc->sc_type) {
2999 case WM_T_82547:
3000 case WM_T_82547_2:
3001 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3002 PBA_22K : PBA_30K;
3003 sc->sc_txfifo_head = 0;
3004 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3005 sc->sc_txfifo_size =
3006 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3007 sc->sc_txfifo_stall = 0;
3008 break;
3009 case WM_T_82571:
3010 case WM_T_82572:
3011 case WM_T_80003:
3012 sc->sc_pba = PBA_32K;
3013 break;
3014 case WM_T_82573:
3015 sc->sc_pba = PBA_12K;
3016 break;
3017 case WM_T_82574:
3018 case WM_T_82583:
3019 sc->sc_pba = PBA_20K;
3020 break;
3021 case WM_T_ICH8:
3022 sc->sc_pba = PBA_8K;
3023 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3024 break;
3025 case WM_T_ICH9:
3026 case WM_T_ICH10:
3027 sc->sc_pba = PBA_10K;
3028 break;
3029 default:
3030 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3031 PBA_40K : PBA_48K;
3032 break;
3033 }
3034 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3035
3036 if (sc->sc_flags & WM_F_PCIE) {
3037 int timeout = 800;
3038
3039 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3040 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3041
3042 while (timeout--) {
3043 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3044 break;
3045 delay(100);
3046 }
3047 }
3048
3049 /* clear interrupt */
3050 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3051
3052 /*
3053 * 82541 Errata 29? & 82547 Errata 28?
3054 * See also the description about PHY_RST bit in CTRL register
3055 * in 8254x_GBe_SDM.pdf.
3056 */
3057 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3058 CSR_WRITE(sc, WMREG_CTRL,
3059 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3060 delay(5000);
3061 }
3062
3063 switch (sc->sc_type) {
3064 case WM_T_82544:
3065 case WM_T_82540:
3066 case WM_T_82545:
3067 case WM_T_82546:
3068 case WM_T_82541:
3069 case WM_T_82541_2:
3070 /*
3071 * On some chipsets, a reset through a memory-mapped write
3072 * cycle can cause the chip to reset before completing the
3073 * write cycle. This causes major headache that can be
3074 * avoided by issuing the reset via indirect register writes
3075 * through I/O space.
3076 *
3077 * So, if we successfully mapped the I/O BAR at attach time,
3078 * use that. Otherwise, try our luck with a memory-mapped
3079 * reset.
3080 */
3081 if (sc->sc_flags & WM_F_IOH_VALID)
3082 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3083 else
3084 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3085 break;
3086
3087 case WM_T_82545_3:
3088 case WM_T_82546_3:
3089 /* Use the shadow control register on these chips. */
3090 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3091 break;
3092
3093 case WM_T_ICH8:
3094 case WM_T_ICH9:
3095 case WM_T_ICH10:
3096 wm_get_swfwhw_semaphore(sc);
3097 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3098 delay(10000);
3099
3100 default:
3101 /* Everything else can safely use the documented method. */
3102 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3103 break;
3104 }
3105 delay(10000);
3106
3107 /* reload EEPROM */
3108 switch(sc->sc_type) {
3109 case WM_T_82542_2_0:
3110 case WM_T_82542_2_1:
3111 case WM_T_82543:
3112 case WM_T_82544:
3113 delay(10);
3114 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3115 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3116 delay(2000);
3117 break;
3118 case WM_T_82541:
3119 case WM_T_82541_2:
3120 case WM_T_82547:
3121 case WM_T_82547_2:
3122 delay(20000);
3123 break;
3124 case WM_T_82573:
3125 case WM_T_82574:
3126 case WM_T_82583:
3127 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3128 delay(10);
3129 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3130 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3131 }
3132 /* FALLTHROUGH */
3133 default:
3134 /* check EECD_EE_AUTORD */
3135 wm_get_auto_rd_done(sc);
3136 }
3137
3138 /* reload sc_ctrl */
3139 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3140
3141 #if 0
3142 for (i = 0; i < 1000; i++) {
3143 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3144 return;
3145 }
3146 delay(20);
3147 }
3148
3149 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3150 log(LOG_ERR, "%s: reset failed to complete\n",
3151 device_xname(sc->sc_dev));
3152 #endif
3153 }
3154
3155 /*
3156 * wm_init: [ifnet interface function]
3157 *
3158 * Initialize the interface. Must be called at splnet().
3159 */
3160 static int
3161 wm_init(struct ifnet *ifp)
3162 {
3163 struct wm_softc *sc = ifp->if_softc;
3164 struct wm_rxsoft *rxs;
3165 int i, error = 0;
3166 uint32_t reg;
3167
3168 /*
3169 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3170 * There is a small but measurable benefit to avoiding the adjusment
3171 * of the descriptor so that the headers are aligned, for normal mtu,
3172 * on such platforms. One possibility is that the DMA itself is
3173 * slightly more efficient if the front of the entire packet (instead
3174 * of the front of the headers) is aligned.
3175 *
3176 * Note we must always set align_tweak to 0 if we are using
3177 * jumbo frames.
3178 */
3179 #ifdef __NO_STRICT_ALIGNMENT
3180 sc->sc_align_tweak = 0;
3181 #else
3182 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3183 sc->sc_align_tweak = 0;
3184 else
3185 sc->sc_align_tweak = 2;
3186 #endif /* __NO_STRICT_ALIGNMENT */
3187
3188 /* Cancel any pending I/O. */
3189 wm_stop(ifp, 0);
3190
3191 /* update statistics before reset */
3192 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3193 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3194
3195 /* Reset the chip to a known state. */
3196 wm_reset(sc);
3197
3198 switch (sc->sc_type) {
3199 case WM_T_82571:
3200 case WM_T_82572:
3201 case WM_T_82573:
3202 case WM_T_82574:
3203 case WM_T_82583:
3204 case WM_T_80003:
3205 case WM_T_ICH8:
3206 case WM_T_ICH9:
3207 case WM_T_ICH10:
3208 if (wm_check_mng_mode(sc) != 0)
3209 wm_get_hw_control(sc);
3210 break;
3211 default:
3212 break;
3213 }
3214
3215 /* Initialize the transmit descriptor ring. */
3216 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3217 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3218 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3219 sc->sc_txfree = WM_NTXDESC(sc);
3220 sc->sc_txnext = 0;
3221
3222 if (sc->sc_type < WM_T_82543) {
3223 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3224 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3225 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3226 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3227 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3228 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3229 } else {
3230 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3231 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3232 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3233 CSR_WRITE(sc, WMREG_TDH, 0);
3234 CSR_WRITE(sc, WMREG_TDT, 0);
3235 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3236 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3237
3238 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3239 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3240 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3241 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3242 }
3243 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3244 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3245
3246 /* Initialize the transmit job descriptors. */
3247 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3248 sc->sc_txsoft[i].txs_mbuf = NULL;
3249 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3250 sc->sc_txsnext = 0;
3251 sc->sc_txsdirty = 0;
3252
3253 /*
3254 * Initialize the receive descriptor and receive job
3255 * descriptor rings.
3256 */
3257 if (sc->sc_type < WM_T_82543) {
3258 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3259 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3260 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3261 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3262 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3263 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3264
3265 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3266 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3267 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3268 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3269 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3270 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3271 } else {
3272 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3273 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3274 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3275 CSR_WRITE(sc, WMREG_RDH, 0);
3276 CSR_WRITE(sc, WMREG_RDT, 0);
3277 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3278 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3279 }
3280 for (i = 0; i < WM_NRXDESC; i++) {
3281 rxs = &sc->sc_rxsoft[i];
3282 if (rxs->rxs_mbuf == NULL) {
3283 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3284 log(LOG_ERR, "%s: unable to allocate or map rx "
3285 "buffer %d, error = %d\n",
3286 device_xname(sc->sc_dev), i, error);
3287 /*
3288 * XXX Should attempt to run with fewer receive
3289 * XXX buffers instead of just failing.
3290 */
3291 wm_rxdrain(sc);
3292 goto out;
3293 }
3294 } else
3295 WM_INIT_RXDESC(sc, i);
3296 }
3297 sc->sc_rxptr = 0;
3298 sc->sc_rxdiscard = 0;
3299 WM_RXCHAIN_RESET(sc);
3300
3301 /*
3302 * Clear out the VLAN table -- we don't use it (yet).
3303 */
3304 CSR_WRITE(sc, WMREG_VET, 0);
3305 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3306 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3307
3308 /*
3309 * Set up flow-control parameters.
3310 *
3311 * XXX Values could probably stand some tuning.
3312 */
3313 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3314 && (sc->sc_type != WM_T_ICH10)) {
3315 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3316 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3317 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3318 }
3319
3320 sc->sc_fcrtl = FCRTL_DFLT;
3321 if (sc->sc_type < WM_T_82543) {
3322 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3323 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3324 } else {
3325 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3326 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3327 }
3328
3329 if (sc->sc_type == WM_T_80003)
3330 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3331 else
3332 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3333
3334 /* Deal with VLAN enables. */
3335 if (VLAN_ATTACHED(&sc->sc_ethercom))
3336 sc->sc_ctrl |= CTRL_VME;
3337 else
3338 sc->sc_ctrl &= ~CTRL_VME;
3339
3340 /* Write the control registers. */
3341 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3342
3343 if (sc->sc_flags & WM_F_HAS_MII) {
3344 int val;
3345
3346 switch (sc->sc_type) {
3347 case WM_T_80003:
3348 case WM_T_ICH8:
3349 case WM_T_ICH9:
3350 case WM_T_ICH10:
3351 /*
3352 * Set the mac to wait the maximum time between each
3353 * iteration and increase the max iterations when
3354 * polling the phy; this fixes erroneous timeouts at
3355 * 10Mbps.
3356 */
3357 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3358 0xFFFF);
3359 val = wm_kmrn_readreg(sc,
3360 KUMCTRLSTA_OFFSET_INB_PARAM);
3361 val |= 0x3F;
3362 wm_kmrn_writereg(sc,
3363 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3364 break;
3365 default:
3366 break;
3367 }
3368
3369 if (sc->sc_type == WM_T_80003) {
3370 val = CSR_READ(sc, WMREG_CTRL_EXT);
3371 val &= ~CTRL_EXT_LINK_MODE_MASK;
3372 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3373
3374 /* Bypass RX and TX FIFO's */
3375 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3376 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3377 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3378
3379 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3380 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3381 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3382 }
3383 }
3384 #if 0
3385 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3386 #endif
3387
3388 /*
3389 * Set up checksum offload parameters.
3390 */
3391 reg = CSR_READ(sc, WMREG_RXCSUM);
3392 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3393 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3394 reg |= RXCSUM_IPOFL;
3395 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3396 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3397 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3398 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3399 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3400
3401 /* Reset TBI's RXCFG count */
3402 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3403
3404 /*
3405 * Set up the interrupt registers.
3406 */
3407 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3408 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3409 ICR_RXO | ICR_RXT0;
3410 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3411 sc->sc_icr |= ICR_RXCFG;
3412 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3413
3414 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3415 || (sc->sc_type == WM_T_ICH10)) {
3416 reg = CSR_READ(sc, WMREG_KABGTXD);
3417 reg |= KABGTXD_BGSQLBIAS;
3418 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3419 }
3420
3421 /* Set up the inter-packet gap. */
3422 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3423
3424 if (sc->sc_type >= WM_T_82543) {
3425 /*
3426 * Set up the interrupt throttling register (units of 256ns)
3427 * Note that a footnote in Intel's documentation says this
3428 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3429 * or 10Mbit mode. Empirically, it appears to be the case
3430 * that that is also true for the 1024ns units of the other
3431 * interrupt-related timer registers -- so, really, we ought
3432 * to divide this value by 4 when the link speed is low.
3433 *
3434 * XXX implement this division at link speed change!
3435 */
3436
3437 /*
3438 * For N interrupts/sec, set this value to:
3439 * 1000000000 / (N * 256). Note that we set the
3440 * absolute and packet timer values to this value
3441 * divided by 4 to get "simple timer" behavior.
3442 */
3443
3444 sc->sc_itr = 1500; /* 2604 ints/sec */
3445 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3446 }
3447
3448 /* Set the VLAN ethernetype. */
3449 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3450
3451 /*
3452 * Set up the transmit control register; we start out with
3453 * a collision distance suitable for FDX, but update it whe
3454 * we resolve the media type.
3455 */
3456 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3457 | TCTL_CT(TX_COLLISION_THRESHOLD)
3458 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3459 if (sc->sc_type >= WM_T_82571)
3460 sc->sc_tctl |= TCTL_MULR;
3461 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3462
3463 if (sc->sc_type == WM_T_80003) {
3464 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3465 reg &= ~TCTL_EXT_GCEX_MASK;
3466 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3467 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3468 }
3469
3470 /* Set the media. */
3471 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3472 goto out;
3473
3474 /*
3475 * Set up the receive control register; we actually program
3476 * the register when we set the receive filter. Use multicast
3477 * address offset type 0.
3478 *
3479 * Only the i82544 has the ability to strip the incoming
3480 * CRC, so we don't enable that feature.
3481 */
3482 sc->sc_mchash_type = 0;
3483 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3484 | RCTL_MO(sc->sc_mchash_type);
3485
3486 /* 82573 doesn't support jumbo frame */
3487 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3488 sc->sc_type != WM_T_82583 && sc->sc_type != WM_T_ICH8)
3489 sc->sc_rctl |= RCTL_LPE;
3490
3491 if (MCLBYTES == 2048) {
3492 sc->sc_rctl |= RCTL_2k;
3493 } else {
3494 if (sc->sc_type >= WM_T_82543) {
3495 switch(MCLBYTES) {
3496 case 4096:
3497 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3498 break;
3499 case 8192:
3500 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3501 break;
3502 case 16384:
3503 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3504 break;
3505 default:
3506 panic("wm_init: MCLBYTES %d unsupported",
3507 MCLBYTES);
3508 break;
3509 }
3510 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3511 }
3512
3513 /* Set the receive filter. */
3514 wm_set_filter(sc);
3515
3516 /* Start the one second link check clock. */
3517 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3518
3519 /* ...all done! */
3520 ifp->if_flags |= IFF_RUNNING;
3521 ifp->if_flags &= ~IFF_OACTIVE;
3522
3523 out:
3524 if (error)
3525 log(LOG_ERR, "%s: interface not running\n",
3526 device_xname(sc->sc_dev));
3527 return (error);
3528 }
3529
3530 /*
3531 * wm_rxdrain:
3532 *
3533 * Drain the receive queue.
3534 */
3535 static void
3536 wm_rxdrain(struct wm_softc *sc)
3537 {
3538 struct wm_rxsoft *rxs;
3539 int i;
3540
3541 for (i = 0; i < WM_NRXDESC; i++) {
3542 rxs = &sc->sc_rxsoft[i];
3543 if (rxs->rxs_mbuf != NULL) {
3544 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3545 m_freem(rxs->rxs_mbuf);
3546 rxs->rxs_mbuf = NULL;
3547 }
3548 }
3549 }
3550
3551 /*
3552 * wm_stop: [ifnet interface function]
3553 *
3554 * Stop transmission on the interface.
3555 */
3556 static void
3557 wm_stop(struct ifnet *ifp, int disable)
3558 {
3559 struct wm_softc *sc = ifp->if_softc;
3560 struct wm_txsoft *txs;
3561 int i;
3562
3563 /* Stop the one second clock. */
3564 callout_stop(&sc->sc_tick_ch);
3565
3566 /* Stop the 82547 Tx FIFO stall check timer. */
3567 if (sc->sc_type == WM_T_82547)
3568 callout_stop(&sc->sc_txfifo_ch);
3569
3570 if (sc->sc_flags & WM_F_HAS_MII) {
3571 /* Down the MII. */
3572 mii_down(&sc->sc_mii);
3573 } else {
3574 #if 0
3575 /* Should we clear PHY's status properly? */
3576 wm_reset(sc);
3577 #endif
3578 }
3579
3580 /* Stop the transmit and receive processes. */
3581 CSR_WRITE(sc, WMREG_TCTL, 0);
3582 CSR_WRITE(sc, WMREG_RCTL, 0);
3583
3584 /*
3585 * Clear the interrupt mask to ensure the device cannot assert its
3586 * interrupt line.
3587 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3588 * any currently pending or shared interrupt.
3589 */
3590 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3591 sc->sc_icr = 0;
3592
3593 /* Release any queued transmit buffers. */
3594 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3595 txs = &sc->sc_txsoft[i];
3596 if (txs->txs_mbuf != NULL) {
3597 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3598 m_freem(txs->txs_mbuf);
3599 txs->txs_mbuf = NULL;
3600 }
3601 }
3602
3603 /* Mark the interface as down and cancel the watchdog timer. */
3604 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3605 ifp->if_timer = 0;
3606
3607 if (disable)
3608 wm_rxdrain(sc);
3609 }
3610
3611 void
3612 wm_get_auto_rd_done(struct wm_softc *sc)
3613 {
3614 int i;
3615
3616 /* wait for eeprom to reload */
3617 switch (sc->sc_type) {
3618 case WM_T_82571:
3619 case WM_T_82572:
3620 case WM_T_82573:
3621 case WM_T_82574:
3622 case WM_T_82583:
3623 case WM_T_80003:
3624 case WM_T_ICH8:
3625 case WM_T_ICH9:
3626 case WM_T_ICH10:
3627 for (i = 10; i > 0; i--) {
3628 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3629 break;
3630 delay(1000);
3631 }
3632 if (i == 0) {
3633 log(LOG_ERR, "%s: auto read from eeprom failed to "
3634 "complete\n", device_xname(sc->sc_dev));
3635 }
3636 break;
3637 default:
3638 delay(5000);
3639 break;
3640 }
3641
3642 /* Phy configuration starts after EECD_AUTO_RD is set */
3643 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
3644 || sc->sc_type == WM_T_82574)
3645 delay(25000);
3646 }
3647
3648 /*
3649 * wm_acquire_eeprom:
3650 *
3651 * Perform the EEPROM handshake required on some chips.
3652 */
3653 static int
3654 wm_acquire_eeprom(struct wm_softc *sc)
3655 {
3656 uint32_t reg;
3657 int x;
3658 int ret = 0;
3659
3660 /* always success */
3661 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3662 return 0;
3663
3664 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3665 ret = wm_get_swfwhw_semaphore(sc);
3666 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3667 /* this will also do wm_get_swsm_semaphore() if needed */
3668 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3669 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3670 ret = wm_get_swsm_semaphore(sc);
3671 }
3672
3673 if (ret) {
3674 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3675 __func__);
3676 return 1;
3677 }
3678
3679 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3680 reg = CSR_READ(sc, WMREG_EECD);
3681
3682 /* Request EEPROM access. */
3683 reg |= EECD_EE_REQ;
3684 CSR_WRITE(sc, WMREG_EECD, reg);
3685
3686 /* ..and wait for it to be granted. */
3687 for (x = 0; x < 1000; x++) {
3688 reg = CSR_READ(sc, WMREG_EECD);
3689 if (reg & EECD_EE_GNT)
3690 break;
3691 delay(5);
3692 }
3693 if ((reg & EECD_EE_GNT) == 0) {
3694 aprint_error_dev(sc->sc_dev,
3695 "could not acquire EEPROM GNT\n");
3696 reg &= ~EECD_EE_REQ;
3697 CSR_WRITE(sc, WMREG_EECD, reg);
3698 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3699 wm_put_swfwhw_semaphore(sc);
3700 if (sc->sc_flags & WM_F_SWFW_SYNC)
3701 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3702 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3703 wm_put_swsm_semaphore(sc);
3704 return (1);
3705 }
3706 }
3707
3708 return (0);
3709 }
3710
3711 /*
3712 * wm_release_eeprom:
3713 *
3714 * Release the EEPROM mutex.
3715 */
3716 static void
3717 wm_release_eeprom(struct wm_softc *sc)
3718 {
3719 uint32_t reg;
3720
3721 /* always success */
3722 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3723 return;
3724
3725 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3726 reg = CSR_READ(sc, WMREG_EECD);
3727 reg &= ~EECD_EE_REQ;
3728 CSR_WRITE(sc, WMREG_EECD, reg);
3729 }
3730
3731 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3732 wm_put_swfwhw_semaphore(sc);
3733 if (sc->sc_flags & WM_F_SWFW_SYNC)
3734 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3735 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3736 wm_put_swsm_semaphore(sc);
3737 }
3738
3739 /*
3740 * wm_eeprom_sendbits:
3741 *
3742 * Send a series of bits to the EEPROM.
3743 */
3744 static void
3745 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3746 {
3747 uint32_t reg;
3748 int x;
3749
3750 reg = CSR_READ(sc, WMREG_EECD);
3751
3752 for (x = nbits; x > 0; x--) {
3753 if (bits & (1U << (x - 1)))
3754 reg |= EECD_DI;
3755 else
3756 reg &= ~EECD_DI;
3757 CSR_WRITE(sc, WMREG_EECD, reg);
3758 delay(2);
3759 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3760 delay(2);
3761 CSR_WRITE(sc, WMREG_EECD, reg);
3762 delay(2);
3763 }
3764 }
3765
3766 /*
3767 * wm_eeprom_recvbits:
3768 *
3769 * Receive a series of bits from the EEPROM.
3770 */
3771 static void
3772 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3773 {
3774 uint32_t reg, val;
3775 int x;
3776
3777 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3778
3779 val = 0;
3780 for (x = nbits; x > 0; x--) {
3781 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3782 delay(2);
3783 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3784 val |= (1U << (x - 1));
3785 CSR_WRITE(sc, WMREG_EECD, reg);
3786 delay(2);
3787 }
3788 *valp = val;
3789 }
3790
3791 /*
3792 * wm_read_eeprom_uwire:
3793 *
3794 * Read a word from the EEPROM using the MicroWire protocol.
3795 */
3796 static int
3797 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3798 {
3799 uint32_t reg, val;
3800 int i;
3801
3802 for (i = 0; i < wordcnt; i++) {
3803 /* Clear SK and DI. */
3804 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3805 CSR_WRITE(sc, WMREG_EECD, reg);
3806
3807 /* Set CHIP SELECT. */
3808 reg |= EECD_CS;
3809 CSR_WRITE(sc, WMREG_EECD, reg);
3810 delay(2);
3811
3812 /* Shift in the READ command. */
3813 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3814
3815 /* Shift in address. */
3816 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3817
3818 /* Shift out the data. */
3819 wm_eeprom_recvbits(sc, &val, 16);
3820 data[i] = val & 0xffff;
3821
3822 /* Clear CHIP SELECT. */
3823 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3824 CSR_WRITE(sc, WMREG_EECD, reg);
3825 delay(2);
3826 }
3827
3828 return (0);
3829 }
3830
3831 /*
3832 * wm_spi_eeprom_ready:
3833 *
3834 * Wait for a SPI EEPROM to be ready for commands.
3835 */
3836 static int
3837 wm_spi_eeprom_ready(struct wm_softc *sc)
3838 {
3839 uint32_t val;
3840 int usec;
3841
3842 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3843 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3844 wm_eeprom_recvbits(sc, &val, 8);
3845 if ((val & SPI_SR_RDY) == 0)
3846 break;
3847 }
3848 if (usec >= SPI_MAX_RETRIES) {
3849 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3850 return (1);
3851 }
3852 return (0);
3853 }
3854
3855 /*
3856 * wm_read_eeprom_spi:
3857 *
3858 * Read a work from the EEPROM using the SPI protocol.
3859 */
3860 static int
3861 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3862 {
3863 uint32_t reg, val;
3864 int i;
3865 uint8_t opc;
3866
3867 /* Clear SK and CS. */
3868 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3869 CSR_WRITE(sc, WMREG_EECD, reg);
3870 delay(2);
3871
3872 if (wm_spi_eeprom_ready(sc))
3873 return (1);
3874
3875 /* Toggle CS to flush commands. */
3876 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3877 delay(2);
3878 CSR_WRITE(sc, WMREG_EECD, reg);
3879 delay(2);
3880
3881 opc = SPI_OPC_READ;
3882 if (sc->sc_ee_addrbits == 8 && word >= 128)
3883 opc |= SPI_OPC_A8;
3884
3885 wm_eeprom_sendbits(sc, opc, 8);
3886 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3887
3888 for (i = 0; i < wordcnt; i++) {
3889 wm_eeprom_recvbits(sc, &val, 16);
3890 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3891 }
3892
3893 /* Raise CS and clear SK. */
3894 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3895 CSR_WRITE(sc, WMREG_EECD, reg);
3896 delay(2);
3897
3898 return (0);
3899 }
3900
3901 #define EEPROM_CHECKSUM 0xBABA
3902 #define EEPROM_SIZE 0x0040
3903
3904 /*
3905 * wm_validate_eeprom_checksum
3906 *
3907 * The checksum is defined as the sum of the first 64 (16 bit) words.
3908 */
3909 static int
3910 wm_validate_eeprom_checksum(struct wm_softc *sc)
3911 {
3912 uint16_t checksum;
3913 uint16_t eeprom_data;
3914 int i;
3915
3916 checksum = 0;
3917
3918 for (i = 0; i < EEPROM_SIZE; i++) {
3919 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3920 return 1;
3921 checksum += eeprom_data;
3922 }
3923
3924 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3925 return 1;
3926
3927 return 0;
3928 }
3929
3930 /*
3931 * wm_read_eeprom:
3932 *
3933 * Read data from the serial EEPROM.
3934 */
3935 static int
3936 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3937 {
3938 int rv;
3939
3940 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3941 return 1;
3942
3943 if (wm_acquire_eeprom(sc))
3944 return 1;
3945
3946 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3947 || (sc->sc_type == WM_T_ICH10))
3948 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3949 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3950 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3951 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3952 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3953 else
3954 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3955
3956 wm_release_eeprom(sc);
3957 return rv;
3958 }
3959
3960 static int
3961 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3962 uint16_t *data)
3963 {
3964 int i, eerd = 0;
3965 int error = 0;
3966
3967 for (i = 0; i < wordcnt; i++) {
3968 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3969
3970 CSR_WRITE(sc, WMREG_EERD, eerd);
3971 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3972 if (error != 0)
3973 break;
3974
3975 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3976 }
3977
3978 return error;
3979 }
3980
3981 static int
3982 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3983 {
3984 uint32_t attempts = 100000;
3985 uint32_t i, reg = 0;
3986 int32_t done = -1;
3987
3988 for (i = 0; i < attempts; i++) {
3989 reg = CSR_READ(sc, rw);
3990
3991 if (reg & EERD_DONE) {
3992 done = 0;
3993 break;
3994 }
3995 delay(5);
3996 }
3997
3998 return done;
3999 }
4000
4001 /*
4002 * wm_add_rxbuf:
4003 *
4004 * Add a receive buffer to the indiciated descriptor.
4005 */
4006 static int
4007 wm_add_rxbuf(struct wm_softc *sc, int idx)
4008 {
4009 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
4010 struct mbuf *m;
4011 int error;
4012
4013 MGETHDR(m, M_DONTWAIT, MT_DATA);
4014 if (m == NULL)
4015 return (ENOBUFS);
4016
4017 MCLGET(m, M_DONTWAIT);
4018 if ((m->m_flags & M_EXT) == 0) {
4019 m_freem(m);
4020 return (ENOBUFS);
4021 }
4022
4023 if (rxs->rxs_mbuf != NULL)
4024 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4025
4026 rxs->rxs_mbuf = m;
4027
4028 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4029 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4030 BUS_DMA_READ|BUS_DMA_NOWAIT);
4031 if (error) {
4032 /* XXX XXX XXX */
4033 aprint_error_dev(sc->sc_dev,
4034 "unable to load rx DMA map %d, error = %d\n",
4035 idx, error);
4036 panic("wm_add_rxbuf");
4037 }
4038
4039 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4040 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4041
4042 WM_INIT_RXDESC(sc, idx);
4043
4044 return (0);
4045 }
4046
4047 /*
4048 * wm_set_ral:
4049 *
4050 * Set an entery in the receive address list.
4051 */
4052 static void
4053 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4054 {
4055 uint32_t ral_lo, ral_hi;
4056
4057 if (enaddr != NULL) {
4058 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4059 (enaddr[3] << 24);
4060 ral_hi = enaddr[4] | (enaddr[5] << 8);
4061 ral_hi |= RAL_AV;
4062 } else {
4063 ral_lo = 0;
4064 ral_hi = 0;
4065 }
4066
4067 if (sc->sc_type >= WM_T_82544) {
4068 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4069 ral_lo);
4070 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4071 ral_hi);
4072 } else {
4073 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4074 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4075 }
4076 }
4077
4078 /*
4079 * wm_mchash:
4080 *
4081 * Compute the hash of the multicast address for the 4096-bit
4082 * multicast filter.
4083 */
4084 static uint32_t
4085 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4086 {
4087 static const int lo_shift[4] = { 4, 3, 2, 0 };
4088 static const int hi_shift[4] = { 4, 5, 6, 8 };
4089 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4090 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4091 uint32_t hash;
4092
4093 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4094 || (sc->sc_type == WM_T_ICH10)) {
4095 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4096 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4097 return (hash & 0x3ff);
4098 }
4099 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4100 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4101
4102 return (hash & 0xfff);
4103 }
4104
4105 /*
4106 * wm_set_filter:
4107 *
4108 * Set up the receive filter.
4109 */
4110 static void
4111 wm_set_filter(struct wm_softc *sc)
4112 {
4113 struct ethercom *ec = &sc->sc_ethercom;
4114 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4115 struct ether_multi *enm;
4116 struct ether_multistep step;
4117 bus_addr_t mta_reg;
4118 uint32_t hash, reg, bit;
4119 int i, size;
4120
4121 if (sc->sc_type >= WM_T_82544)
4122 mta_reg = WMREG_CORDOVA_MTA;
4123 else
4124 mta_reg = WMREG_MTA;
4125
4126 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4127
4128 if (ifp->if_flags & IFF_BROADCAST)
4129 sc->sc_rctl |= RCTL_BAM;
4130 if (ifp->if_flags & IFF_PROMISC) {
4131 sc->sc_rctl |= RCTL_UPE;
4132 goto allmulti;
4133 }
4134
4135 /*
4136 * Set the station address in the first RAL slot, and
4137 * clear the remaining slots.
4138 */
4139 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4140 || (sc->sc_type == WM_T_ICH10))
4141 size = WM_ICH8_RAL_TABSIZE;
4142 else
4143 size = WM_RAL_TABSIZE;
4144 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4145 for (i = 1; i < size; i++)
4146 wm_set_ral(sc, NULL, i);
4147
4148 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4149 || (sc->sc_type == WM_T_ICH10))
4150 size = WM_ICH8_MC_TABSIZE;
4151 else
4152 size = WM_MC_TABSIZE;
4153 /* Clear out the multicast table. */
4154 for (i = 0; i < size; i++)
4155 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4156
4157 ETHER_FIRST_MULTI(step, ec, enm);
4158 while (enm != NULL) {
4159 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4160 /*
4161 * We must listen to a range of multicast addresses.
4162 * For now, just accept all multicasts, rather than
4163 * trying to set only those filter bits needed to match
4164 * the range. (At this time, the only use of address
4165 * ranges is for IP multicast routing, for which the
4166 * range is big enough to require all bits set.)
4167 */
4168 goto allmulti;
4169 }
4170
4171 hash = wm_mchash(sc, enm->enm_addrlo);
4172
4173 reg = (hash >> 5);
4174 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4175 || (sc->sc_type == WM_T_ICH10))
4176 reg &= 0x1f;
4177 else
4178 reg &= 0x7f;
4179 bit = hash & 0x1f;
4180
4181 hash = CSR_READ(sc, mta_reg + (reg << 2));
4182 hash |= 1U << bit;
4183
4184 /* XXX Hardware bug?? */
4185 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4186 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4187 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4188 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4189 } else
4190 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4191
4192 ETHER_NEXT_MULTI(step, enm);
4193 }
4194
4195 ifp->if_flags &= ~IFF_ALLMULTI;
4196 goto setit;
4197
4198 allmulti:
4199 ifp->if_flags |= IFF_ALLMULTI;
4200 sc->sc_rctl |= RCTL_MPE;
4201
4202 setit:
4203 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4204 }
4205
4206 /*
4207 * wm_tbi_mediainit:
4208 *
4209 * Initialize media for use on 1000BASE-X devices.
4210 */
4211 static void
4212 wm_tbi_mediainit(struct wm_softc *sc)
4213 {
4214 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4215 const char *sep = "";
4216
4217 if (sc->sc_type < WM_T_82543)
4218 sc->sc_tipg = TIPG_WM_DFLT;
4219 else
4220 sc->sc_tipg = TIPG_LG_DFLT;
4221
4222 sc->sc_tbi_anegticks = 5;
4223
4224 /* Initialize our media structures */
4225 sc->sc_mii.mii_ifp = ifp;
4226
4227 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4228 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4229 wm_tbi_mediastatus);
4230
4231 /*
4232 * SWD Pins:
4233 *
4234 * 0 = Link LED (output)
4235 * 1 = Loss Of Signal (input)
4236 */
4237 sc->sc_ctrl |= CTRL_SWDPIO(0);
4238 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4239
4240 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4241
4242 #define ADD(ss, mm, dd) \
4243 do { \
4244 aprint_normal("%s%s", sep, ss); \
4245 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4246 sep = ", "; \
4247 } while (/*CONSTCOND*/0)
4248
4249 aprint_normal_dev(sc->sc_dev, "");
4250 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4251 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4252 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4253 aprint_normal("\n");
4254
4255 #undef ADD
4256
4257 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4258 }
4259
4260 /*
4261 * wm_tbi_mediastatus: [ifmedia interface function]
4262 *
4263 * Get the current interface media status on a 1000BASE-X device.
4264 */
4265 static void
4266 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4267 {
4268 struct wm_softc *sc = ifp->if_softc;
4269 uint32_t ctrl, status;
4270
4271 ifmr->ifm_status = IFM_AVALID;
4272 ifmr->ifm_active = IFM_ETHER;
4273
4274 status = CSR_READ(sc, WMREG_STATUS);
4275 if ((status & STATUS_LU) == 0) {
4276 ifmr->ifm_active |= IFM_NONE;
4277 return;
4278 }
4279
4280 ifmr->ifm_status |= IFM_ACTIVE;
4281 ifmr->ifm_active |= IFM_1000_SX;
4282 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4283 ifmr->ifm_active |= IFM_FDX;
4284 ctrl = CSR_READ(sc, WMREG_CTRL);
4285 if (ctrl & CTRL_RFCE)
4286 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4287 if (ctrl & CTRL_TFCE)
4288 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4289 }
4290
4291 /*
4292 * wm_tbi_mediachange: [ifmedia interface function]
4293 *
4294 * Set hardware to newly-selected media on a 1000BASE-X device.
4295 */
4296 static int
4297 wm_tbi_mediachange(struct ifnet *ifp)
4298 {
4299 struct wm_softc *sc = ifp->if_softc;
4300 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4301 uint32_t status;
4302 int i;
4303
4304 sc->sc_txcw = 0;
4305 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4306 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4307 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4308 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4309 sc->sc_txcw |= TXCW_ANE;
4310 } else {
4311 /*
4312 * If autonegotiation is turned off, force link up and turn on
4313 * full duplex
4314 */
4315 sc->sc_txcw &= ~TXCW_ANE;
4316 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4317 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4318 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4319 delay(1000);
4320 }
4321
4322 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4323 device_xname(sc->sc_dev),sc->sc_txcw));
4324 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4325 delay(10000);
4326
4327 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4328 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4329
4330 /*
4331 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4332 * optics detect a signal, 0 if they don't.
4333 */
4334 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4335 /* Have signal; wait for the link to come up. */
4336
4337 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4338 /*
4339 * Reset the link, and let autonegotiation do its thing
4340 */
4341 sc->sc_ctrl |= CTRL_LRST;
4342 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4343 delay(1000);
4344 sc->sc_ctrl &= ~CTRL_LRST;
4345 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4346 delay(1000);
4347 }
4348
4349 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4350 delay(10000);
4351 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4352 break;
4353 }
4354
4355 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4356 device_xname(sc->sc_dev),i));
4357
4358 status = CSR_READ(sc, WMREG_STATUS);
4359 DPRINTF(WM_DEBUG_LINK,
4360 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4361 device_xname(sc->sc_dev),status, STATUS_LU));
4362 if (status & STATUS_LU) {
4363 /* Link is up. */
4364 DPRINTF(WM_DEBUG_LINK,
4365 ("%s: LINK: set media -> link up %s\n",
4366 device_xname(sc->sc_dev),
4367 (status & STATUS_FD) ? "FDX" : "HDX"));
4368
4369 /*
4370 * NOTE: CTRL will update TFCE and RFCE automatically,
4371 * so we should update sc->sc_ctrl
4372 */
4373 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4374 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4375 sc->sc_fcrtl &= ~FCRTL_XONE;
4376 if (status & STATUS_FD)
4377 sc->sc_tctl |=
4378 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4379 else
4380 sc->sc_tctl |=
4381 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4382 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4383 sc->sc_fcrtl |= FCRTL_XONE;
4384 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4385 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4386 WMREG_OLD_FCRTL : WMREG_FCRTL,
4387 sc->sc_fcrtl);
4388 sc->sc_tbi_linkup = 1;
4389 } else {
4390 if (i == WM_LINKUP_TIMEOUT)
4391 wm_check_for_link(sc);
4392 /* Link is down. */
4393 DPRINTF(WM_DEBUG_LINK,
4394 ("%s: LINK: set media -> link down\n",
4395 device_xname(sc->sc_dev)));
4396 sc->sc_tbi_linkup = 0;
4397 }
4398 } else {
4399 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4400 device_xname(sc->sc_dev)));
4401 sc->sc_tbi_linkup = 0;
4402 }
4403
4404 wm_tbi_set_linkled(sc);
4405
4406 return (0);
4407 }
4408
4409 /*
4410 * wm_tbi_set_linkled:
4411 *
4412 * Update the link LED on 1000BASE-X devices.
4413 */
4414 static void
4415 wm_tbi_set_linkled(struct wm_softc *sc)
4416 {
4417
4418 if (sc->sc_tbi_linkup)
4419 sc->sc_ctrl |= CTRL_SWDPIN(0);
4420 else
4421 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4422
4423 /* 82540 or newer devices are active low */
4424 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4425
4426 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4427 }
4428
4429 /*
4430 * wm_tbi_check_link:
4431 *
4432 * Check the link on 1000BASE-X devices.
4433 */
4434 static void
4435 wm_tbi_check_link(struct wm_softc *sc)
4436 {
4437 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4438 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4439 uint32_t rxcw, ctrl, status;
4440
4441 status = CSR_READ(sc, WMREG_STATUS);
4442
4443 rxcw = CSR_READ(sc, WMREG_RXCW);
4444 ctrl = CSR_READ(sc, WMREG_CTRL);
4445
4446 /* set link status */
4447 if ((status & STATUS_LU) == 0) {
4448 DPRINTF(WM_DEBUG_LINK,
4449 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4450 sc->sc_tbi_linkup = 0;
4451 } else if (sc->sc_tbi_linkup == 0) {
4452 DPRINTF(WM_DEBUG_LINK,
4453 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4454 (status & STATUS_FD) ? "FDX" : "HDX"));
4455 sc->sc_tbi_linkup = 1;
4456 }
4457
4458 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4459 && ((status & STATUS_LU) == 0)) {
4460 sc->sc_tbi_linkup = 0;
4461 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4462 /* RXCFG storm! */
4463 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4464 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4465 wm_init(ifp);
4466 wm_start(ifp);
4467 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4468 /* If the timer expired, retry autonegotiation */
4469 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4470 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4471 sc->sc_tbi_ticks = 0;
4472 /*
4473 * Reset the link, and let autonegotiation do
4474 * its thing
4475 */
4476 sc->sc_ctrl |= CTRL_LRST;
4477 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4478 delay(1000);
4479 sc->sc_ctrl &= ~CTRL_LRST;
4480 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4481 delay(1000);
4482 CSR_WRITE(sc, WMREG_TXCW,
4483 sc->sc_txcw & ~TXCW_ANE);
4484 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4485 }
4486 }
4487 }
4488
4489 wm_tbi_set_linkled(sc);
4490 }
4491
4492 /*
4493 * wm_gmii_reset:
4494 *
4495 * Reset the PHY.
4496 */
4497 static void
4498 wm_gmii_reset(struct wm_softc *sc)
4499 {
4500 uint32_t reg;
4501 int func = 0; /* XXX gcc */
4502
4503 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4504 || (sc->sc_type == WM_T_ICH10)) {
4505 if (wm_get_swfwhw_semaphore(sc)) {
4506 aprint_error_dev(sc->sc_dev,
4507 "%s: failed to get semaphore\n", __func__);
4508 return;
4509 }
4510 }
4511 if (sc->sc_type == WM_T_80003) {
4512 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4513 if (wm_get_swfw_semaphore(sc,
4514 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4515 aprint_error_dev(sc->sc_dev,
4516 "%s: failed to get semaphore\n", __func__);
4517 return;
4518 }
4519 }
4520
4521 switch (sc->sc_type) {
4522 case WM_T_82542_2_0:
4523 case WM_T_82542_2_1:
4524 /* null ? */
4525 break;
4526 case WM_T_82543:
4527 /*
4528 * With 82543, we need to force speed and duplex on the MAC
4529 * equal to what the PHY speed and duplex configuration is.
4530 * In addition, we need to perform a hardware reset on the PHY
4531 * to take it out of reset.
4532 */
4533 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4534 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4535
4536 /* The PHY reset pin is active-low. */
4537 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4538 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4539 CTRL_EXT_SWDPIN(4));
4540 reg |= CTRL_EXT_SWDPIO(4);
4541
4542 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4543 delay(10);
4544
4545 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4546 delay(10*1000);
4547
4548 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4549 delay(150);
4550 #if 0
4551 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4552 #endif
4553 delay(20*1000); /* extra delay to get PHY ID? */
4554 break;
4555 case WM_T_82544: /* reset 10000us */
4556 case WM_T_82540:
4557 case WM_T_82545:
4558 case WM_T_82545_3:
4559 case WM_T_82546:
4560 case WM_T_82546_3:
4561 case WM_T_82541:
4562 case WM_T_82541_2:
4563 case WM_T_82547:
4564 case WM_T_82547_2:
4565 case WM_T_82571: /* reset 100us */
4566 case WM_T_82572:
4567 case WM_T_82573:
4568 case WM_T_82574:
4569 case WM_T_82583:
4570 case WM_T_80003:
4571 /* generic reset */
4572 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4573 delay((sc->sc_type >= WM_T_82571) ? 100 : 10*1000);
4574 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4575 delay(150*1000);
4576
4577 if ((sc->sc_type == WM_T_82541)
4578 || (sc->sc_type == WM_T_82541_2)
4579 || (sc->sc_type == WM_T_82547)
4580 || (sc->sc_type == WM_T_82547_2)) {
4581 /* workaround for igp are done in igp_reset() */
4582 /* XXX add code to set LED after phy reset */
4583 }
4584 break;
4585 case WM_T_ICH8:
4586 case WM_T_ICH9:
4587 case WM_T_ICH10:
4588 /* generic reset */
4589 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4590 delay(100);
4591 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4592 delay(150*1000);
4593
4594 /* Allow time for h/w to get to a quiescent state afer reset */
4595 delay(10*1000);
4596
4597 /* XXX add code to set LED after phy reset */
4598 break;
4599 default:
4600 panic("unknown sc_type\n");
4601 break;
4602 }
4603
4604 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4605 || (sc->sc_type == WM_T_ICH10))
4606 wm_put_swfwhw_semaphore(sc);
4607 if (sc->sc_type == WM_T_80003)
4608 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4609 }
4610
4611 /*
4612 * wm_gmii_mediainit:
4613 *
4614 * Initialize media for use on 1000BASE-T devices.
4615 */
4616 static void
4617 wm_gmii_mediainit(struct wm_softc *sc)
4618 {
4619 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4620
4621 /* We have MII. */
4622 sc->sc_flags |= WM_F_HAS_MII;
4623
4624 if (sc->sc_type == WM_T_80003)
4625 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4626 else
4627 sc->sc_tipg = TIPG_1000T_DFLT;
4628
4629 /*
4630 * Let the chip set speed/duplex on its own based on
4631 * signals from the PHY.
4632 * XXXbouyer - I'm not sure this is right for the 80003,
4633 * the em driver only sets CTRL_SLU here - but it seems to work.
4634 */
4635 sc->sc_ctrl |= CTRL_SLU;
4636 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4637
4638 /* Initialize our media structures and probe the GMII. */
4639 sc->sc_mii.mii_ifp = ifp;
4640
4641 if (sc->sc_type >= WM_T_80003) {
4642 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4643 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4644 } else if (sc->sc_type >= WM_T_82544) {
4645 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4646 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4647 } else {
4648 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4649 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4650 }
4651 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4652
4653 wm_gmii_reset(sc);
4654
4655 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4656 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4657 wm_gmii_mediastatus);
4658
4659 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4660 MII_OFFSET_ANY, MIIF_DOPAUSE);
4661
4662 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4663 /* if failed, retry with *_bm_* */
4664 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4665 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4666
4667 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4668 MII_OFFSET_ANY, MIIF_DOPAUSE);
4669 }
4670 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4671 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4672 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4673 } else {
4674 if (sc->sc_type >= WM_T_82574) {
4675 struct mii_softc *child;
4676
4677 child = LIST_FIRST(&sc->sc_mii.mii_phys);
4678 /* fix read/write functions as e1000 driver */
4679 if (device_is_a(child->mii_dev, "igphy")) {
4680 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4681 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4682 } else {
4683 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4684 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4685 }
4686 }
4687
4688 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4689 }
4690 }
4691
4692 /*
4693 * wm_gmii_mediastatus: [ifmedia interface function]
4694 *
4695 * Get the current interface media status on a 1000BASE-T device.
4696 */
4697 static void
4698 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4699 {
4700 struct wm_softc *sc = ifp->if_softc;
4701
4702 ether_mediastatus(ifp, ifmr);
4703 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4704 sc->sc_flowflags;
4705 }
4706
4707 /*
4708 * wm_gmii_mediachange: [ifmedia interface function]
4709 *
4710 * Set hardware to newly-selected media on a 1000BASE-T device.
4711 */
4712 static int
4713 wm_gmii_mediachange(struct ifnet *ifp)
4714 {
4715 struct wm_softc *sc = ifp->if_softc;
4716 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4717 int rc;
4718
4719 if ((ifp->if_flags & IFF_UP) == 0)
4720 return 0;
4721
4722 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4723 sc->sc_ctrl |= CTRL_SLU;
4724 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4725 || (sc->sc_type > WM_T_82543)) {
4726 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4727 } else {
4728 sc->sc_ctrl &= ~CTRL_ASDE;
4729 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4730 if (ife->ifm_media & IFM_FDX)
4731 sc->sc_ctrl |= CTRL_FD;
4732 switch(IFM_SUBTYPE(ife->ifm_media)) {
4733 case IFM_10_T:
4734 sc->sc_ctrl |= CTRL_SPEED_10;
4735 break;
4736 case IFM_100_TX:
4737 sc->sc_ctrl |= CTRL_SPEED_100;
4738 break;
4739 case IFM_1000_T:
4740 sc->sc_ctrl |= CTRL_SPEED_1000;
4741 break;
4742 default:
4743 panic("wm_gmii_mediachange: bad media 0x%x",
4744 ife->ifm_media);
4745 }
4746 }
4747 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4748 if (sc->sc_type <= WM_T_82543)
4749 wm_gmii_reset(sc);
4750
4751 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4752 return 0;
4753 return rc;
4754 }
4755
4756 #define MDI_IO CTRL_SWDPIN(2)
4757 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4758 #define MDI_CLK CTRL_SWDPIN(3)
4759
4760 static void
4761 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4762 {
4763 uint32_t i, v;
4764
4765 v = CSR_READ(sc, WMREG_CTRL);
4766 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4767 v |= MDI_DIR | CTRL_SWDPIO(3);
4768
4769 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4770 if (data & i)
4771 v |= MDI_IO;
4772 else
4773 v &= ~MDI_IO;
4774 CSR_WRITE(sc, WMREG_CTRL, v);
4775 delay(10);
4776 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4777 delay(10);
4778 CSR_WRITE(sc, WMREG_CTRL, v);
4779 delay(10);
4780 }
4781 }
4782
4783 static uint32_t
4784 i82543_mii_recvbits(struct wm_softc *sc)
4785 {
4786 uint32_t v, i, data = 0;
4787
4788 v = CSR_READ(sc, WMREG_CTRL);
4789 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4790 v |= CTRL_SWDPIO(3);
4791
4792 CSR_WRITE(sc, WMREG_CTRL, v);
4793 delay(10);
4794 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4795 delay(10);
4796 CSR_WRITE(sc, WMREG_CTRL, v);
4797 delay(10);
4798
4799 for (i = 0; i < 16; i++) {
4800 data <<= 1;
4801 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4802 delay(10);
4803 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4804 data |= 1;
4805 CSR_WRITE(sc, WMREG_CTRL, v);
4806 delay(10);
4807 }
4808
4809 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4810 delay(10);
4811 CSR_WRITE(sc, WMREG_CTRL, v);
4812 delay(10);
4813
4814 return (data);
4815 }
4816
4817 #undef MDI_IO
4818 #undef MDI_DIR
4819 #undef MDI_CLK
4820
4821 /*
4822 * wm_gmii_i82543_readreg: [mii interface function]
4823 *
4824 * Read a PHY register on the GMII (i82543 version).
4825 */
4826 static int
4827 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4828 {
4829 struct wm_softc *sc = device_private(self);
4830 int rv;
4831
4832 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4833 i82543_mii_sendbits(sc, reg | (phy << 5) |
4834 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4835 rv = i82543_mii_recvbits(sc) & 0xffff;
4836
4837 DPRINTF(WM_DEBUG_GMII,
4838 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4839 device_xname(sc->sc_dev), phy, reg, rv));
4840
4841 return (rv);
4842 }
4843
4844 /*
4845 * wm_gmii_i82543_writereg: [mii interface function]
4846 *
4847 * Write a PHY register on the GMII (i82543 version).
4848 */
4849 static void
4850 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4851 {
4852 struct wm_softc *sc = device_private(self);
4853
4854 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4855 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4856 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4857 (MII_COMMAND_START << 30), 32);
4858 }
4859
4860 /*
4861 * wm_gmii_i82544_readreg: [mii interface function]
4862 *
4863 * Read a PHY register on the GMII.
4864 */
4865 static int
4866 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4867 {
4868 struct wm_softc *sc = device_private(self);
4869 uint32_t mdic = 0;
4870 int i, rv;
4871
4872 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4873 MDIC_REGADD(reg));
4874
4875 for (i = 0; i < 320; i++) {
4876 mdic = CSR_READ(sc, WMREG_MDIC);
4877 if (mdic & MDIC_READY)
4878 break;
4879 delay(10);
4880 }
4881
4882 if ((mdic & MDIC_READY) == 0) {
4883 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4884 device_xname(sc->sc_dev), phy, reg);
4885 rv = 0;
4886 } else if (mdic & MDIC_E) {
4887 #if 0 /* This is normal if no PHY is present. */
4888 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4889 device_xname(sc->sc_dev), phy, reg);
4890 #endif
4891 rv = 0;
4892 } else {
4893 rv = MDIC_DATA(mdic);
4894 if (rv == 0xffff)
4895 rv = 0;
4896 }
4897
4898 return (rv);
4899 }
4900
4901 /*
4902 * wm_gmii_i82544_writereg: [mii interface function]
4903 *
4904 * Write a PHY register on the GMII.
4905 */
4906 static void
4907 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4908 {
4909 struct wm_softc *sc = device_private(self);
4910 uint32_t mdic = 0;
4911 int i;
4912
4913 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4914 MDIC_REGADD(reg) | MDIC_DATA(val));
4915
4916 for (i = 0; i < 320; i++) {
4917 mdic = CSR_READ(sc, WMREG_MDIC);
4918 if (mdic & MDIC_READY)
4919 break;
4920 delay(10);
4921 }
4922
4923 if ((mdic & MDIC_READY) == 0)
4924 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4925 device_xname(sc->sc_dev), phy, reg);
4926 else if (mdic & MDIC_E)
4927 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4928 device_xname(sc->sc_dev), phy, reg);
4929 }
4930
4931 /*
4932 * wm_gmii_i80003_readreg: [mii interface function]
4933 *
4934 * Read a PHY register on the kumeran
4935 * This could be handled by the PHY layer if we didn't have to lock the
4936 * ressource ...
4937 */
4938 static int
4939 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4940 {
4941 struct wm_softc *sc = device_private(self);
4942 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4943 int rv;
4944
4945 if (phy != 1) /* only one PHY on kumeran bus */
4946 return 0;
4947
4948 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4949 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4950 __func__);
4951 return 0;
4952 }
4953
4954 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4955 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4956 reg >> GG82563_PAGE_SHIFT);
4957 } else {
4958 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4959 reg >> GG82563_PAGE_SHIFT);
4960 }
4961 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4962 delay(200);
4963 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4964 delay(200);
4965
4966 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4967 return (rv);
4968 }
4969
4970 /*
4971 * wm_gmii_i80003_writereg: [mii interface function]
4972 *
4973 * Write a PHY register on the kumeran.
4974 * This could be handled by the PHY layer if we didn't have to lock the
4975 * ressource ...
4976 */
4977 static void
4978 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4979 {
4980 struct wm_softc *sc = device_private(self);
4981 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4982
4983 if (phy != 1) /* only one PHY on kumeran bus */
4984 return;
4985
4986 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4987 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4988 __func__);
4989 return;
4990 }
4991
4992 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4993 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4994 reg >> GG82563_PAGE_SHIFT);
4995 } else {
4996 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4997 reg >> GG82563_PAGE_SHIFT);
4998 }
4999 /* Wait more 200us for a bug of the ready bit in the MDIC register */
5000 delay(200);
5001 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5002 delay(200);
5003
5004 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5005 }
5006
5007 /*
5008 * wm_gmii_bm_readreg: [mii interface function]
5009 *
5010 * Read a PHY register on the kumeran
5011 * This could be handled by the PHY layer if we didn't have to lock the
5012 * ressource ...
5013 */
5014 static int
5015 wm_gmii_bm_readreg(device_t self, int phy, int reg)
5016 {
5017 struct wm_softc *sc = device_private(self);
5018 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5019 int rv;
5020
5021 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5022 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5023 __func__);
5024 return 0;
5025 }
5026
5027 if (reg > GG82563_MAX_REG_ADDRESS) {
5028 if (phy == 1)
5029 wm_gmii_i82544_writereg(self, phy, 0x1f,
5030 reg);
5031 else
5032 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5033 reg >> GG82563_PAGE_SHIFT);
5034
5035 }
5036
5037 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
5038 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5039 return (rv);
5040 }
5041
5042 /*
5043 * wm_gmii_bm_writereg: [mii interface function]
5044 *
5045 * Write a PHY register on the kumeran.
5046 * This could be handled by the PHY layer if we didn't have to lock the
5047 * ressource ...
5048 */
5049 static void
5050 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
5051 {
5052 struct wm_softc *sc = device_private(self);
5053 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5054
5055 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5056 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5057 __func__);
5058 return;
5059 }
5060
5061 if (reg > GG82563_MAX_REG_ADDRESS) {
5062 if (phy == 1)
5063 wm_gmii_i82544_writereg(self, phy, 0x1f,
5064 reg);
5065 else
5066 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
5067 reg >> GG82563_PAGE_SHIFT);
5068
5069 }
5070
5071 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
5072 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5073 }
5074
5075 /*
5076 * wm_gmii_statchg: [mii interface function]
5077 *
5078 * Callback from MII layer when media changes.
5079 */
5080 static void
5081 wm_gmii_statchg(device_t self)
5082 {
5083 struct wm_softc *sc = device_private(self);
5084 struct mii_data *mii = &sc->sc_mii;
5085
5086 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5087 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5088 sc->sc_fcrtl &= ~FCRTL_XONE;
5089
5090 /*
5091 * Get flow control negotiation result.
5092 */
5093 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
5094 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
5095 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
5096 mii->mii_media_active &= ~IFM_ETH_FMASK;
5097 }
5098
5099 if (sc->sc_flowflags & IFM_FLOW) {
5100 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
5101 sc->sc_ctrl |= CTRL_TFCE;
5102 sc->sc_fcrtl |= FCRTL_XONE;
5103 }
5104 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
5105 sc->sc_ctrl |= CTRL_RFCE;
5106 }
5107
5108 if (sc->sc_mii.mii_media_active & IFM_FDX) {
5109 DPRINTF(WM_DEBUG_LINK,
5110 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5111 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5112 } else {
5113 DPRINTF(WM_DEBUG_LINK,
5114 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5115 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5116 }
5117
5118 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5119 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5120 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5121 : WMREG_FCRTL, sc->sc_fcrtl);
5122 if (sc->sc_type == WM_T_80003) {
5123 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5124 case IFM_1000_T:
5125 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5126 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5127 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5128 break;
5129 default:
5130 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5131 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5132 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5133 break;
5134 }
5135 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5136 }
5137 }
5138
5139 /*
5140 * wm_kmrn_readreg:
5141 *
5142 * Read a kumeran register
5143 */
5144 static int
5145 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5146 {
5147 int rv;
5148
5149 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5150 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5151 aprint_error_dev(sc->sc_dev,
5152 "%s: failed to get semaphore\n", __func__);
5153 return 0;
5154 }
5155 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5156 if (wm_get_swfwhw_semaphore(sc)) {
5157 aprint_error_dev(sc->sc_dev,
5158 "%s: failed to get semaphore\n", __func__);
5159 return 0;
5160 }
5161 }
5162
5163 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5164 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5165 KUMCTRLSTA_REN);
5166 delay(2);
5167
5168 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5169
5170 if (sc->sc_flags == WM_F_SWFW_SYNC)
5171 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5172 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5173 wm_put_swfwhw_semaphore(sc);
5174
5175 return (rv);
5176 }
5177
5178 /*
5179 * wm_kmrn_writereg:
5180 *
5181 * Write a kumeran register
5182 */
5183 static void
5184 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5185 {
5186
5187 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5188 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5189 aprint_error_dev(sc->sc_dev,
5190 "%s: failed to get semaphore\n", __func__);
5191 return;
5192 }
5193 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5194 if (wm_get_swfwhw_semaphore(sc)) {
5195 aprint_error_dev(sc->sc_dev,
5196 "%s: failed to get semaphore\n", __func__);
5197 return;
5198 }
5199 }
5200
5201 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5202 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5203 (val & KUMCTRLSTA_MASK));
5204
5205 if (sc->sc_flags == WM_F_SWFW_SYNC)
5206 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5207 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5208 wm_put_swfwhw_semaphore(sc);
5209 }
5210
5211 static int
5212 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5213 {
5214 uint32_t eecd = 0;
5215
5216 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
5217 || sc->sc_type == WM_T_82583) {
5218 eecd = CSR_READ(sc, WMREG_EECD);
5219
5220 /* Isolate bits 15 & 16 */
5221 eecd = ((eecd >> 15) & 0x03);
5222
5223 /* If both bits are set, device is Flash type */
5224 if (eecd == 0x03)
5225 return 0;
5226 }
5227 return 1;
5228 }
5229
5230 static int
5231 wm_get_swsm_semaphore(struct wm_softc *sc)
5232 {
5233 int32_t timeout;
5234 uint32_t swsm;
5235
5236 /* Get the FW semaphore. */
5237 timeout = 1000 + 1; /* XXX */
5238 while (timeout) {
5239 swsm = CSR_READ(sc, WMREG_SWSM);
5240 swsm |= SWSM_SWESMBI;
5241 CSR_WRITE(sc, WMREG_SWSM, swsm);
5242 /* if we managed to set the bit we got the semaphore. */
5243 swsm = CSR_READ(sc, WMREG_SWSM);
5244 if (swsm & SWSM_SWESMBI)
5245 break;
5246
5247 delay(50);
5248 timeout--;
5249 }
5250
5251 if (timeout == 0) {
5252 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5253 /* Release semaphores */
5254 wm_put_swsm_semaphore(sc);
5255 return 1;
5256 }
5257 return 0;
5258 }
5259
5260 static void
5261 wm_put_swsm_semaphore(struct wm_softc *sc)
5262 {
5263 uint32_t swsm;
5264
5265 swsm = CSR_READ(sc, WMREG_SWSM);
5266 swsm &= ~(SWSM_SWESMBI);
5267 CSR_WRITE(sc, WMREG_SWSM, swsm);
5268 }
5269
5270 static int
5271 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5272 {
5273 uint32_t swfw_sync;
5274 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5275 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5276 int timeout = 200;
5277
5278 for(timeout = 0; timeout < 200; timeout++) {
5279 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5280 if (wm_get_swsm_semaphore(sc)) {
5281 aprint_error_dev(sc->sc_dev,
5282 "%s: failed to get semaphore\n",
5283 __func__);
5284 return 1;
5285 }
5286 }
5287 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5288 if ((swfw_sync & (swmask | fwmask)) == 0) {
5289 swfw_sync |= swmask;
5290 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5291 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5292 wm_put_swsm_semaphore(sc);
5293 return 0;
5294 }
5295 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5296 wm_put_swsm_semaphore(sc);
5297 delay(5000);
5298 }
5299 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5300 device_xname(sc->sc_dev), mask, swfw_sync);
5301 return 1;
5302 }
5303
5304 static void
5305 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5306 {
5307 uint32_t swfw_sync;
5308
5309 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5310 while (wm_get_swsm_semaphore(sc) != 0)
5311 continue;
5312 }
5313 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5314 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5315 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5316 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5317 wm_put_swsm_semaphore(sc);
5318 }
5319
5320 static int
5321 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5322 {
5323 uint32_t ext_ctrl;
5324 int timeout = 200;
5325
5326 for(timeout = 0; timeout < 200; timeout++) {
5327 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5328 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5329 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5330
5331 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5332 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5333 return 0;
5334 delay(5000);
5335 }
5336 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5337 device_xname(sc->sc_dev), ext_ctrl);
5338 return 1;
5339 }
5340
5341 static void
5342 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5343 {
5344 uint32_t ext_ctrl;
5345 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5346 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5347 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5348 }
5349
5350 static int
5351 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5352 {
5353 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5354 uint8_t bank_high_byte;
5355 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5356
5357 if (sc->sc_type != WM_T_ICH10) {
5358 /* Value of bit 22 corresponds to the flash bank we're on. */
5359 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5360 } else {
5361 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5362 if ((bank_high_byte & 0xc0) == 0x80)
5363 *bank = 0;
5364 else {
5365 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5366 &bank_high_byte);
5367 if ((bank_high_byte & 0xc0) == 0x80)
5368 *bank = 1;
5369 else {
5370 aprint_error_dev(sc->sc_dev,
5371 "EEPROM not present\n");
5372 return -1;
5373 }
5374 }
5375 }
5376
5377 return 0;
5378 }
5379
5380 /******************************************************************************
5381 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5382 * register.
5383 *
5384 * sc - Struct containing variables accessed by shared code
5385 * offset - offset of word in the EEPROM to read
5386 * data - word read from the EEPROM
5387 * words - number of words to read
5388 *****************************************************************************/
5389 static int
5390 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5391 {
5392 int32_t error = 0;
5393 uint32_t flash_bank = 0;
5394 uint32_t act_offset = 0;
5395 uint32_t bank_offset = 0;
5396 uint16_t word = 0;
5397 uint16_t i = 0;
5398
5399 /* We need to know which is the valid flash bank. In the event
5400 * that we didn't allocate eeprom_shadow_ram, we may not be
5401 * managing flash_bank. So it cannot be trusted and needs
5402 * to be updated with each read.
5403 */
5404 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5405 if (error) {
5406 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5407 __func__);
5408 return error;
5409 }
5410
5411 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5412 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5413
5414 error = wm_get_swfwhw_semaphore(sc);
5415 if (error) {
5416 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5417 __func__);
5418 return error;
5419 }
5420
5421 for (i = 0; i < words; i++) {
5422 /* The NVM part needs a byte offset, hence * 2 */
5423 act_offset = bank_offset + ((offset + i) * 2);
5424 error = wm_read_ich8_word(sc, act_offset, &word);
5425 if (error) {
5426 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5427 __func__);
5428 break;
5429 }
5430 data[i] = word;
5431 }
5432
5433 wm_put_swfwhw_semaphore(sc);
5434 return error;
5435 }
5436
5437 /******************************************************************************
5438 * This function does initial flash setup so that a new read/write/erase cycle
5439 * can be started.
5440 *
5441 * sc - The pointer to the hw structure
5442 ****************************************************************************/
5443 static int32_t
5444 wm_ich8_cycle_init(struct wm_softc *sc)
5445 {
5446 uint16_t hsfsts;
5447 int32_t error = 1;
5448 int32_t i = 0;
5449
5450 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5451
5452 /* May be check the Flash Des Valid bit in Hw status */
5453 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5454 return error;
5455 }
5456
5457 /* Clear FCERR in Hw status by writing 1 */
5458 /* Clear DAEL in Hw status by writing a 1 */
5459 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5460
5461 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5462
5463 /* Either we should have a hardware SPI cycle in progress bit to check
5464 * against, in order to start a new cycle or FDONE bit should be changed
5465 * in the hardware so that it is 1 after harware reset, which can then be
5466 * used as an indication whether a cycle is in progress or has been
5467 * completed .. we should also have some software semaphore mechanism to
5468 * guard FDONE or the cycle in progress bit so that two threads access to
5469 * those bits can be sequentiallized or a way so that 2 threads dont
5470 * start the cycle at the same time */
5471
5472 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5473 /* There is no cycle running at present, so we can start a cycle */
5474 /* Begin by setting Flash Cycle Done. */
5475 hsfsts |= HSFSTS_DONE;
5476 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5477 error = 0;
5478 } else {
5479 /* otherwise poll for sometime so the current cycle has a chance
5480 * to end before giving up. */
5481 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5482 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5483 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5484 error = 0;
5485 break;
5486 }
5487 delay(1);
5488 }
5489 if (error == 0) {
5490 /* Successful in waiting for previous cycle to timeout,
5491 * now set the Flash Cycle Done. */
5492 hsfsts |= HSFSTS_DONE;
5493 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5494 }
5495 }
5496 return error;
5497 }
5498
5499 /******************************************************************************
5500 * This function starts a flash cycle and waits for its completion
5501 *
5502 * sc - The pointer to the hw structure
5503 ****************************************************************************/
5504 static int32_t
5505 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5506 {
5507 uint16_t hsflctl;
5508 uint16_t hsfsts;
5509 int32_t error = 1;
5510 uint32_t i = 0;
5511
5512 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5513 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5514 hsflctl |= HSFCTL_GO;
5515 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5516
5517 /* wait till FDONE bit is set to 1 */
5518 do {
5519 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5520 if (hsfsts & HSFSTS_DONE)
5521 break;
5522 delay(1);
5523 i++;
5524 } while (i < timeout);
5525 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5526 error = 0;
5527 }
5528 return error;
5529 }
5530
5531 /******************************************************************************
5532 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5533 *
5534 * sc - The pointer to the hw structure
5535 * index - The index of the byte or word to read.
5536 * size - Size of data to read, 1=byte 2=word
5537 * data - Pointer to the word to store the value read.
5538 *****************************************************************************/
5539 static int32_t
5540 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5541 uint32_t size, uint16_t* data)
5542 {
5543 uint16_t hsfsts;
5544 uint16_t hsflctl;
5545 uint32_t flash_linear_address;
5546 uint32_t flash_data = 0;
5547 int32_t error = 1;
5548 int32_t count = 0;
5549
5550 if (size < 1 || size > 2 || data == 0x0 ||
5551 index > ICH_FLASH_LINEAR_ADDR_MASK)
5552 return error;
5553
5554 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5555 sc->sc_ich8_flash_base;
5556
5557 do {
5558 delay(1);
5559 /* Steps */
5560 error = wm_ich8_cycle_init(sc);
5561 if (error)
5562 break;
5563
5564 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5565 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5566 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5567 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5568 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5569
5570 /* Write the last 24 bits of index into Flash Linear address field in
5571 * Flash Address */
5572 /* TODO: TBD maybe check the index against the size of flash */
5573
5574 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5575
5576 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5577
5578 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5579 * sequence a few more times, else read in (shift in) the Flash Data0,
5580 * the order is least significant byte first msb to lsb */
5581 if (error == 0) {
5582 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5583 if (size == 1) {
5584 *data = (uint8_t)(flash_data & 0x000000FF);
5585 } else if (size == 2) {
5586 *data = (uint16_t)(flash_data & 0x0000FFFF);
5587 }
5588 break;
5589 } else {
5590 /* If we've gotten here, then things are probably completely hosed,
5591 * but if the error condition is detected, it won't hurt to give
5592 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5593 */
5594 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5595 if (hsfsts & HSFSTS_ERR) {
5596 /* Repeat for some time before giving up. */
5597 continue;
5598 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5599 break;
5600 }
5601 }
5602 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5603
5604 return error;
5605 }
5606
5607 /******************************************************************************
5608 * Reads a single byte from the NVM using the ICH8 flash access registers.
5609 *
5610 * sc - pointer to wm_hw structure
5611 * index - The index of the byte to read.
5612 * data - Pointer to a byte to store the value read.
5613 *****************************************************************************/
5614 static int32_t
5615 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5616 {
5617 int32_t status;
5618 uint16_t word = 0;
5619
5620 status = wm_read_ich8_data(sc, index, 1, &word);
5621 if (status == 0) {
5622 *data = (uint8_t)word;
5623 }
5624
5625 return status;
5626 }
5627
5628 /******************************************************************************
5629 * Reads a word from the NVM using the ICH8 flash access registers.
5630 *
5631 * sc - pointer to wm_hw structure
5632 * index - The starting byte index of the word to read.
5633 * data - Pointer to a word to store the value read.
5634 *****************************************************************************/
5635 static int32_t
5636 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5637 {
5638 int32_t status;
5639
5640 status = wm_read_ich8_data(sc, index, 2, data);
5641 return status;
5642 }
5643
5644 static int
5645 wm_check_mng_mode(struct wm_softc *sc)
5646 {
5647 int rv;
5648
5649 switch (sc->sc_type) {
5650 case WM_T_ICH8:
5651 case WM_T_ICH9:
5652 case WM_T_ICH10:
5653 rv = wm_check_mng_mode_ich8lan(sc);
5654 break;
5655 case WM_T_82574:
5656 case WM_T_82583:
5657 rv = wm_check_mng_mode_82574(sc);
5658 break;
5659 case WM_T_82571:
5660 case WM_T_82572:
5661 case WM_T_82573:
5662 case WM_T_80003:
5663 rv = wm_check_mng_mode_generic(sc);
5664 break;
5665 default:
5666 /* noting to do */
5667 rv = 0;
5668 break;
5669 }
5670
5671 return rv;
5672 }
5673
5674 static int
5675 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5676 {
5677 uint32_t fwsm;
5678
5679 fwsm = CSR_READ(sc, WMREG_FWSM);
5680
5681 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5682 return 1;
5683
5684 return 0;
5685 }
5686
5687 static int
5688 wm_check_mng_mode_82574(struct wm_softc *sc)
5689 {
5690 uint16_t data;
5691
5692 wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5693
5694 if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5695 return 1;
5696
5697 return 0;
5698 }
5699
5700 static int
5701 wm_check_mng_mode_generic(struct wm_softc *sc)
5702 {
5703 uint32_t fwsm;
5704
5705 fwsm = CSR_READ(sc, WMREG_FWSM);
5706
5707 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5708 return 1;
5709
5710 return 0;
5711 }
5712
5713 static void
5714 wm_get_hw_control(struct wm_softc *sc)
5715 {
5716 uint32_t reg;
5717
5718 switch (sc->sc_type) {
5719 case WM_T_82573:
5720 #if 0
5721 case WM_T_82574:
5722 case WM_T_82583:
5723 /*
5724 * FreeBSD's em driver has the function for 82574 to checks
5725 * the management mode, but it's not used. Why?
5726 */
5727 #endif
5728 reg = CSR_READ(sc, WMREG_SWSM);
5729 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5730 break;
5731 case WM_T_82571:
5732 case WM_T_82572:
5733 case WM_T_80003:
5734 case WM_T_ICH8:
5735 case WM_T_ICH9:
5736 case WM_T_ICH10:
5737 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5738 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5739 break;
5740 default:
5741 break;
5742 }
5743 }
5744
5745 /* XXX Currently TBI only */
5746 static int
5747 wm_check_for_link(struct wm_softc *sc)
5748 {
5749 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5750 uint32_t rxcw;
5751 uint32_t ctrl;
5752 uint32_t status;
5753 uint32_t sig;
5754
5755 rxcw = CSR_READ(sc, WMREG_RXCW);
5756 ctrl = CSR_READ(sc, WMREG_CTRL);
5757 status = CSR_READ(sc, WMREG_STATUS);
5758
5759 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5760
5761 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5762 device_xname(sc->sc_dev), __func__,
5763 ((ctrl & CTRL_SWDPIN(1)) == sig),
5764 ((status & STATUS_LU) != 0),
5765 ((rxcw & RXCW_C) != 0)
5766 ));
5767
5768 /*
5769 * SWDPIN LU RXCW
5770 * 0 0 0
5771 * 0 0 1 (should not happen)
5772 * 0 1 0 (should not happen)
5773 * 0 1 1 (should not happen)
5774 * 1 0 0 Disable autonego and force linkup
5775 * 1 0 1 got /C/ but not linkup yet
5776 * 1 1 0 (linkup)
5777 * 1 1 1 If IFM_AUTO, back to autonego
5778 *
5779 */
5780 if (((ctrl & CTRL_SWDPIN(1)) == sig)
5781 && ((status & STATUS_LU) == 0)
5782 && ((rxcw & RXCW_C) == 0)) {
5783 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5784 __func__));
5785 sc->sc_tbi_linkup = 0;
5786 /* Disable auto-negotiation in the TXCW register */
5787 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5788
5789 /*
5790 * Force link-up and also force full-duplex.
5791 *
5792 * NOTE: CTRL was updated TFCE and RFCE automatically,
5793 * so we should update sc->sc_ctrl
5794 */
5795 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5796 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5797 } else if(((status & STATUS_LU) != 0)
5798 && ((rxcw & RXCW_C) != 0)
5799 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5800 sc->sc_tbi_linkup = 1;
5801 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5802 __func__));
5803 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5804 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5805 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5806 && ((rxcw & RXCW_C) != 0)) {
5807 DPRINTF(WM_DEBUG_LINK, ("/C/"));
5808 } else {
5809 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5810 status));
5811 }
5812
5813 return 0;
5814 }
5815