if_wm.c revision 1.182 1 /* $NetBSD: if_wm.c,v 1.182 2009/12/16 04:50:35 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.182 2009/12/16 04:50:35 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132 #include <dev/pci/if_wmvar.h>
133
134 #ifdef WM_DEBUG
135 #define WM_DEBUG_LINK 0x01
136 #define WM_DEBUG_TX 0x02
137 #define WM_DEBUG_RX 0x04
138 #define WM_DEBUG_GMII 0x08
139 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
140
141 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
142 #else
143 #define DPRINTF(x, y) /* nothing */
144 #endif /* WM_DEBUG */
145
146 /*
147 * Transmit descriptor list size. Due to errata, we can only have
148 * 256 hardware descriptors in the ring on < 82544, but we use 4096
149 * on >= 82544. We tell the upper layers that they can queue a lot
150 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
151 * of them at a time.
152 *
153 * We allow up to 256 (!) DMA segments per packet. Pathological packet
154 * chains containing many small mbufs have been observed in zero-copy
155 * situations with jumbo frames.
156 */
157 #define WM_NTXSEGS 256
158 #define WM_IFQUEUELEN 256
159 #define WM_TXQUEUELEN_MAX 64
160 #define WM_TXQUEUELEN_MAX_82547 16
161 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
162 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
163 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
164 #define WM_NTXDESC_82542 256
165 #define WM_NTXDESC_82544 4096
166 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
167 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
168 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
169 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
170 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
171
172 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
173
174 /*
175 * Receive descriptor list size. We have one Rx buffer for normal
176 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
177 * packet. We allocate 256 receive descriptors, each with a 2k
178 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
179 */
180 #define WM_NRXDESC 256
181 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
182 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
183 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
184
185 /*
186 * Control structures are DMA'd to the i82542 chip. We allocate them in
187 * a single clump that maps to a single DMA segment to make several things
188 * easier.
189 */
190 struct wm_control_data_82544 {
191 /*
192 * The receive descriptors.
193 */
194 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
195
196 /*
197 * The transmit descriptors. Put these at the end, because
198 * we might use a smaller number of them.
199 */
200 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
201 };
202
203 struct wm_control_data_82542 {
204 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
205 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
206 };
207
208 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
209 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
210 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
211
212 /*
213 * Software state for transmit jobs.
214 */
215 struct wm_txsoft {
216 struct mbuf *txs_mbuf; /* head of our mbuf chain */
217 bus_dmamap_t txs_dmamap; /* our DMA map */
218 int txs_firstdesc; /* first descriptor in packet */
219 int txs_lastdesc; /* last descriptor in packet */
220 int txs_ndesc; /* # of descriptors used */
221 };
222
223 /*
224 * Software state for receive buffers. Each descriptor gets a
225 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
226 * more than one buffer, we chain them together.
227 */
228 struct wm_rxsoft {
229 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
230 bus_dmamap_t rxs_dmamap; /* our DMA map */
231 };
232
233 #define WM_LINKUP_TIMEOUT 50
234
235 /*
236 * Software state per device.
237 */
238 struct wm_softc {
239 device_t sc_dev; /* generic device information */
240 bus_space_tag_t sc_st; /* bus space tag */
241 bus_space_handle_t sc_sh; /* bus space handle */
242 bus_space_tag_t sc_iot; /* I/O space tag */
243 bus_space_handle_t sc_ioh; /* I/O space handle */
244 bus_space_tag_t sc_flasht; /* flash registers space tag */
245 bus_space_handle_t sc_flashh; /* flash registers space handle */
246 bus_dma_tag_t sc_dmat; /* bus DMA tag */
247 struct ethercom sc_ethercom; /* ethernet common data */
248 pci_chipset_tag_t sc_pc;
249 pcitag_t sc_pcitag;
250
251 wm_chip_type sc_type; /* chip type */
252 int sc_flags; /* flags; see below */
253 int sc_if_flags; /* last if_flags */
254 int sc_bus_speed; /* PCI/PCIX bus speed */
255 int sc_pcix_offset; /* PCIX capability register offset */
256 int sc_flowflags; /* 802.3x flow control flags */
257
258 void *sc_ih; /* interrupt cookie */
259
260 int sc_ee_addrbits; /* EEPROM address bits */
261
262 struct mii_data sc_mii; /* MII/media information */
263
264 callout_t sc_tick_ch; /* tick callout */
265
266 bus_dmamap_t sc_cddmamap; /* control data DMA map */
267 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
268
269 int sc_align_tweak;
270
271 /*
272 * Software state for the transmit and receive descriptors.
273 */
274 int sc_txnum; /* must be a power of two */
275 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
276 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
277
278 /*
279 * Control data structures.
280 */
281 int sc_ntxdesc; /* must be a power of two */
282 struct wm_control_data_82544 *sc_control_data;
283 #define sc_txdescs sc_control_data->wcd_txdescs
284 #define sc_rxdescs sc_control_data->wcd_rxdescs
285
286 #ifdef WM_EVENT_COUNTERS
287 /* Event counters. */
288 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
289 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
290 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
291 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
292 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
293 struct evcnt sc_ev_rxintr; /* Rx interrupts */
294 struct evcnt sc_ev_linkintr; /* Link interrupts */
295
296 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
297 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
298 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
299 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
300 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
301 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
302 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
303 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
304
305 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
306 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
307
308 struct evcnt sc_ev_tu; /* Tx underrun */
309
310 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
311 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
312 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
313 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
314 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
315 #endif /* WM_EVENT_COUNTERS */
316
317 bus_addr_t sc_tdt_reg; /* offset of TDT register */
318
319 int sc_txfree; /* number of free Tx descriptors */
320 int sc_txnext; /* next ready Tx descriptor */
321
322 int sc_txsfree; /* number of free Tx jobs */
323 int sc_txsnext; /* next free Tx job */
324 int sc_txsdirty; /* dirty Tx jobs */
325
326 /* These 5 variables are used only on the 82547. */
327 int sc_txfifo_size; /* Tx FIFO size */
328 int sc_txfifo_head; /* current head of FIFO */
329 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
330 int sc_txfifo_stall; /* Tx FIFO is stalled */
331 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
332
333 bus_addr_t sc_rdt_reg; /* offset of RDT register */
334
335 int sc_rxptr; /* next ready Rx descriptor/queue ent */
336 int sc_rxdiscard;
337 int sc_rxlen;
338 struct mbuf *sc_rxhead;
339 struct mbuf *sc_rxtail;
340 struct mbuf **sc_rxtailp;
341
342 uint32_t sc_ctrl; /* prototype CTRL register */
343 #if 0
344 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
345 #endif
346 uint32_t sc_icr; /* prototype interrupt bits */
347 uint32_t sc_itr; /* prototype intr throttling reg */
348 uint32_t sc_tctl; /* prototype TCTL register */
349 uint32_t sc_rctl; /* prototype RCTL register */
350 uint32_t sc_txcw; /* prototype TXCW register */
351 uint32_t sc_tipg; /* prototype TIPG register */
352 uint32_t sc_fcrtl; /* prototype FCRTL register */
353 uint32_t sc_pba; /* prototype PBA register */
354
355 int sc_tbi_linkup; /* TBI link status */
356 int sc_tbi_anegticks; /* autonegotiation ticks */
357 int sc_tbi_ticks; /* tbi ticks */
358 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
359 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
360
361 int sc_mchash_type; /* multicast filter offset */
362
363 #if NRND > 0
364 rndsource_element_t rnd_source; /* random source */
365 #endif
366 int sc_ich8_flash_base;
367 int sc_ich8_flash_bank_size;
368 };
369
370 #define WM_RXCHAIN_RESET(sc) \
371 do { \
372 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
373 *(sc)->sc_rxtailp = NULL; \
374 (sc)->sc_rxlen = 0; \
375 } while (/*CONSTCOND*/0)
376
377 #define WM_RXCHAIN_LINK(sc, m) \
378 do { \
379 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
380 (sc)->sc_rxtailp = &(m)->m_next; \
381 } while (/*CONSTCOND*/0)
382
383 /* sc_flags */
384 #define WM_F_HAS_MII 0x0001 /* has MII */
385 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
386 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
387 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
388 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
389 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
390 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
391 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
392 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
393 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
394 #define WM_F_CSA 0x0400 /* bus is CSA */
395 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
396 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
397 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
398
399 #ifdef WM_EVENT_COUNTERS
400 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
401 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
402 #else
403 #define WM_EVCNT_INCR(ev) /* nothing */
404 #define WM_EVCNT_ADD(ev, val) /* nothing */
405 #endif
406
407 #define CSR_READ(sc, reg) \
408 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
409 #define CSR_WRITE(sc, reg, val) \
410 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
411 #define CSR_WRITE_FLUSH(sc) \
412 (void) CSR_READ((sc), WMREG_STATUS)
413
414 #define ICH8_FLASH_READ32(sc, reg) \
415 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
416 #define ICH8_FLASH_WRITE32(sc, reg, data) \
417 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
418
419 #define ICH8_FLASH_READ16(sc, reg) \
420 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
421 #define ICH8_FLASH_WRITE16(sc, reg, data) \
422 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
423
424 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
425 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
426
427 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
428 #define WM_CDTXADDR_HI(sc, x) \
429 (sizeof(bus_addr_t) == 8 ? \
430 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
431
432 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
433 #define WM_CDRXADDR_HI(sc, x) \
434 (sizeof(bus_addr_t) == 8 ? \
435 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
436
437 #define WM_CDTXSYNC(sc, x, n, ops) \
438 do { \
439 int __x, __n; \
440 \
441 __x = (x); \
442 __n = (n); \
443 \
444 /* If it will wrap around, sync to the end of the ring. */ \
445 if ((__x + __n) > WM_NTXDESC(sc)) { \
446 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
447 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
448 (WM_NTXDESC(sc) - __x), (ops)); \
449 __n -= (WM_NTXDESC(sc) - __x); \
450 __x = 0; \
451 } \
452 \
453 /* Now sync whatever is left. */ \
454 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
455 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
456 } while (/*CONSTCOND*/0)
457
458 #define WM_CDRXSYNC(sc, x, ops) \
459 do { \
460 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
461 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
462 } while (/*CONSTCOND*/0)
463
464 #define WM_INIT_RXDESC(sc, x) \
465 do { \
466 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
467 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
468 struct mbuf *__m = __rxs->rxs_mbuf; \
469 \
470 /* \
471 * Note: We scoot the packet forward 2 bytes in the buffer \
472 * so that the payload after the Ethernet header is aligned \
473 * to a 4-byte boundary. \
474 * \
475 * XXX BRAINDAMAGE ALERT! \
476 * The stupid chip uses the same size for every buffer, which \
477 * is set in the Receive Control register. We are using the 2K \
478 * size option, but what we REALLY want is (2K - 2)! For this \
479 * reason, we can't "scoot" packets longer than the standard \
480 * Ethernet MTU. On strict-alignment platforms, if the total \
481 * size exceeds (2K - 2) we set align_tweak to 0 and let \
482 * the upper layer copy the headers. \
483 */ \
484 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
485 \
486 wm_set_dma_addr(&__rxd->wrx_addr, \
487 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
488 __rxd->wrx_len = 0; \
489 __rxd->wrx_cksum = 0; \
490 __rxd->wrx_status = 0; \
491 __rxd->wrx_errors = 0; \
492 __rxd->wrx_special = 0; \
493 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
494 \
495 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
496 } while (/*CONSTCOND*/0)
497
498 static void wm_start(struct ifnet *);
499 static void wm_watchdog(struct ifnet *);
500 static int wm_ioctl(struct ifnet *, u_long, void *);
501 static int wm_init(struct ifnet *);
502 static void wm_stop(struct ifnet *, int);
503
504 static void wm_reset(struct wm_softc *);
505 static void wm_rxdrain(struct wm_softc *);
506 static int wm_add_rxbuf(struct wm_softc *, int);
507 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
508 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
509 static int wm_validate_eeprom_checksum(struct wm_softc *);
510 static void wm_tick(void *);
511
512 static void wm_set_filter(struct wm_softc *);
513
514 static int wm_intr(void *);
515 static void wm_txintr(struct wm_softc *);
516 static void wm_rxintr(struct wm_softc *);
517 static void wm_linkintr(struct wm_softc *, uint32_t);
518
519 static void wm_tbi_mediainit(struct wm_softc *);
520 static int wm_tbi_mediachange(struct ifnet *);
521 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
522
523 static void wm_tbi_set_linkled(struct wm_softc *);
524 static void wm_tbi_check_link(struct wm_softc *);
525
526 static void wm_gmii_reset(struct wm_softc *);
527
528 static int wm_gmii_i82543_readreg(device_t, int, int);
529 static void wm_gmii_i82543_writereg(device_t, int, int, int);
530
531 static int wm_gmii_i82544_readreg(device_t, int, int);
532 static void wm_gmii_i82544_writereg(device_t, int, int, int);
533
534 static int wm_gmii_i80003_readreg(device_t, int, int);
535 static void wm_gmii_i80003_writereg(device_t, int, int, int);
536
537 static int wm_gmii_bm_readreg(device_t, int, int);
538 static void wm_gmii_bm_writereg(device_t, int, int, int);
539
540 static void wm_gmii_statchg(device_t);
541
542 static void wm_gmii_mediainit(struct wm_softc *);
543 static int wm_gmii_mediachange(struct ifnet *);
544 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
545
546 static int wm_kmrn_readreg(struct wm_softc *, int);
547 static void wm_kmrn_writereg(struct wm_softc *, int, int);
548
549 static int wm_match(device_t, cfdata_t, void *);
550 static void wm_attach(device_t, device_t, void *);
551 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
552 static void wm_get_auto_rd_done(struct wm_softc *);
553 static int wm_get_swsm_semaphore(struct wm_softc *);
554 static void wm_put_swsm_semaphore(struct wm_softc *);
555 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
556 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
557 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
558 static int wm_get_swfwhw_semaphore(struct wm_softc *);
559 static void wm_put_swfwhw_semaphore(struct wm_softc *);
560
561 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
562 static int32_t wm_ich8_cycle_init(struct wm_softc *);
563 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
564 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
565 uint32_t, uint16_t *);
566 static int32_t wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *);
567 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
568 static void wm_82547_txfifo_stall(void *);
569 static int wm_check_mng_mode(struct wm_softc *);
570 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
571 #if 0
572 static int wm_check_mng_mode_82574(struct wm_softc *);
573 #endif
574 static int wm_check_mng_mode_generic(struct wm_softc *);
575 static void wm_get_hw_control(struct wm_softc *);
576 static int wm_check_for_link(struct wm_softc *);
577
578 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
579 wm_match, wm_attach, NULL, NULL);
580
581
582 /*
583 * Devices supported by this driver.
584 */
585 static const struct wm_product {
586 pci_vendor_id_t wmp_vendor;
587 pci_product_id_t wmp_product;
588 const char *wmp_name;
589 wm_chip_type wmp_type;
590 int wmp_flags;
591 #define WMP_F_1000X 0x01
592 #define WMP_F_1000T 0x02
593 } wm_products[] = {
594 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
595 "Intel i82542 1000BASE-X Ethernet",
596 WM_T_82542_2_1, WMP_F_1000X },
597
598 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
599 "Intel i82543GC 1000BASE-X Ethernet",
600 WM_T_82543, WMP_F_1000X },
601
602 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
603 "Intel i82543GC 1000BASE-T Ethernet",
604 WM_T_82543, WMP_F_1000T },
605
606 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
607 "Intel i82544EI 1000BASE-T Ethernet",
608 WM_T_82544, WMP_F_1000T },
609
610 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
611 "Intel i82544EI 1000BASE-X Ethernet",
612 WM_T_82544, WMP_F_1000X },
613
614 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
615 "Intel i82544GC 1000BASE-T Ethernet",
616 WM_T_82544, WMP_F_1000T },
617
618 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
619 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
620 WM_T_82544, WMP_F_1000T },
621
622 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
623 "Intel i82540EM 1000BASE-T Ethernet",
624 WM_T_82540, WMP_F_1000T },
625
626 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
627 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
628 WM_T_82540, WMP_F_1000T },
629
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
631 "Intel i82540EP 1000BASE-T Ethernet",
632 WM_T_82540, WMP_F_1000T },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
635 "Intel i82540EP 1000BASE-T Ethernet",
636 WM_T_82540, WMP_F_1000T },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
639 "Intel i82540EP 1000BASE-T Ethernet",
640 WM_T_82540, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
643 "Intel i82545EM 1000BASE-T Ethernet",
644 WM_T_82545, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
647 "Intel i82545GM 1000BASE-T Ethernet",
648 WM_T_82545_3, WMP_F_1000T },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
651 "Intel i82545GM 1000BASE-X Ethernet",
652 WM_T_82545_3, WMP_F_1000X },
653 #if 0
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
655 "Intel i82545GM Gigabit Ethernet (SERDES)",
656 WM_T_82545_3, WMP_F_SERDES },
657 #endif
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
659 "Intel i82546EB 1000BASE-T Ethernet",
660 WM_T_82546, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
663 "Intel i82546EB 1000BASE-T Ethernet",
664 WM_T_82546, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
667 "Intel i82545EM 1000BASE-X Ethernet",
668 WM_T_82545, WMP_F_1000X },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
671 "Intel i82546EB 1000BASE-X Ethernet",
672 WM_T_82546, WMP_F_1000X },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
675 "Intel i82546GB 1000BASE-T Ethernet",
676 WM_T_82546_3, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
679 "Intel i82546GB 1000BASE-X Ethernet",
680 WM_T_82546_3, WMP_F_1000X },
681 #if 0
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
683 "Intel i82546GB Gigabit Ethernet (SERDES)",
684 WM_T_82546_3, WMP_F_SERDES },
685 #endif
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
687 "i82546GB quad-port Gigabit Ethernet",
688 WM_T_82546_3, WMP_F_1000T },
689
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
691 "i82546GB quad-port Gigabit Ethernet (KSP3)",
692 WM_T_82546_3, WMP_F_1000T },
693
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
695 "Intel PRO/1000MT (82546GB)",
696 WM_T_82546_3, WMP_F_1000T },
697
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
699 "Intel i82541EI 1000BASE-T Ethernet",
700 WM_T_82541, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
703 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
704 WM_T_82541, WMP_F_1000T },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
707 "Intel i82541EI Mobile 1000BASE-T Ethernet",
708 WM_T_82541, WMP_F_1000T },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
711 "Intel i82541ER 1000BASE-T Ethernet",
712 WM_T_82541_2, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
715 "Intel i82541GI 1000BASE-T Ethernet",
716 WM_T_82541_2, WMP_F_1000T },
717
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
719 "Intel i82541GI Mobile 1000BASE-T Ethernet",
720 WM_T_82541_2, WMP_F_1000T },
721
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
723 "Intel i82541PI 1000BASE-T Ethernet",
724 WM_T_82541_2, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
727 "Intel i82547EI 1000BASE-T Ethernet",
728 WM_T_82547, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
731 "Intel i82547EI Mobile 1000BASE-T Ethernet",
732 WM_T_82547, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
735 "Intel i82547GI 1000BASE-T Ethernet",
736 WM_T_82547_2, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
739 "Intel PRO/1000 PT (82571EB)",
740 WM_T_82571, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
743 "Intel PRO/1000 PF (82571EB)",
744 WM_T_82571, WMP_F_1000X },
745 #if 0
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
747 "Intel PRO/1000 PB (82571EB)",
748 WM_T_82571, WMP_F_SERDES },
749 #endif
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
751 "Intel PRO/1000 QT (82571EB)",
752 WM_T_82571, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
755 "Intel i82572EI 1000baseT Ethernet",
756 WM_T_82572, WMP_F_1000T },
757
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
759 "Intel PRO/1000 PT Quad Port Server Adapter",
760 WM_T_82571, WMP_F_1000T, },
761
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
763 "Intel i82572EI 1000baseX Ethernet",
764 WM_T_82572, WMP_F_1000X },
765 #if 0
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
767 "Intel i82572EI Gigabit Ethernet (SERDES)",
768 WM_T_82572, WMP_F_SERDES },
769 #endif
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
772 "Intel i82572EI 1000baseT Ethernet",
773 WM_T_82572, WMP_F_1000T },
774
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
776 "Intel i82573E",
777 WM_T_82573, WMP_F_1000T },
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
780 "Intel i82573E IAMT",
781 WM_T_82573, WMP_F_1000T },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
784 "Intel i82573L Gigabit Ethernet",
785 WM_T_82573, WMP_F_1000T },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
788 "Intel i82574L",
789 WM_T_82574, WMP_F_1000T },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
792 "i80003 dual 1000baseT Ethernet",
793 WM_T_80003, WMP_F_1000T },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
796 "i80003 dual 1000baseX Ethernet",
797 WM_T_80003, WMP_F_1000T },
798 #if 0
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
800 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
801 WM_T_80003, WMP_F_SERDES },
802 #endif
803
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
805 "Intel i80003 1000baseT Ethernet",
806 WM_T_80003, WMP_F_1000T },
807 #if 0
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
809 "Intel i80003 Gigabit Ethernet (SERDES)",
810 WM_T_80003, WMP_F_SERDES },
811 #endif
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
813 "Intel i82801H (M_AMT) LAN Controller",
814 WM_T_ICH8, WMP_F_1000T },
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
816 "Intel i82801H (AMT) LAN Controller",
817 WM_T_ICH8, WMP_F_1000T },
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
819 "Intel i82801H LAN Controller",
820 WM_T_ICH8, WMP_F_1000T },
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
822 "Intel i82801H (IFE) LAN Controller",
823 WM_T_ICH8, WMP_F_1000T },
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
825 "Intel i82801H (M) LAN Controller",
826 WM_T_ICH8, WMP_F_1000T },
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
828 "Intel i82801H IFE (GT) LAN Controller",
829 WM_T_ICH8, WMP_F_1000T },
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
831 "Intel i82801H IFE (G) LAN Controller",
832 WM_T_ICH8, WMP_F_1000T },
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
834 "82801I (AMT) LAN Controller",
835 WM_T_ICH9, WMP_F_1000T },
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
837 "82801I LAN Controller",
838 WM_T_ICH9, WMP_F_1000T },
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
840 "82801I (G) LAN Controller",
841 WM_T_ICH9, WMP_F_1000T },
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
843 "82801I (GT) LAN Controller",
844 WM_T_ICH9, WMP_F_1000T },
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
846 "82801I (C) LAN Controller",
847 WM_T_ICH9, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
849 "82801I mobile LAN Controller",
850 WM_T_ICH9, WMP_F_1000T },
851 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
852 "82801I mobile (V) LAN Controller",
853 WM_T_ICH9, WMP_F_1000T },
854 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
855 "82801I mobile (AMT) LAN Controller",
856 WM_T_ICH9, WMP_F_1000T },
857 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3,
858 "82567LM-3 LAN Controller",
859 WM_T_ICH10, WMP_F_1000T },
860 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3,
861 "82567LF-3 LAN Controller",
862 WM_T_ICH10, WMP_F_1000T },
863 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
864 "i82801J (LF) LAN Controller",
865 WM_T_ICH10, WMP_F_1000T },
866 { 0, 0,
867 NULL,
868 0, 0 },
869 };
870
871 #ifdef WM_EVENT_COUNTERS
872 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
873 #endif /* WM_EVENT_COUNTERS */
874
875 #if 0 /* Not currently used */
876 static inline uint32_t
877 wm_io_read(struct wm_softc *sc, int reg)
878 {
879
880 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
881 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
882 }
883 #endif
884
885 static inline void
886 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
887 {
888
889 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
890 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
891 }
892
893 static inline void
894 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
895 {
896 wa->wa_low = htole32(v & 0xffffffffU);
897 if (sizeof(bus_addr_t) == 8)
898 wa->wa_high = htole32((uint64_t) v >> 32);
899 else
900 wa->wa_high = 0;
901 }
902
903 static const struct wm_product *
904 wm_lookup(const struct pci_attach_args *pa)
905 {
906 const struct wm_product *wmp;
907
908 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
909 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
910 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
911 return (wmp);
912 }
913 return (NULL);
914 }
915
916 static int
917 wm_match(device_t parent, cfdata_t cf, void *aux)
918 {
919 struct pci_attach_args *pa = aux;
920
921 if (wm_lookup(pa) != NULL)
922 return (1);
923
924 return (0);
925 }
926
927 static void
928 wm_attach(device_t parent, device_t self, void *aux)
929 {
930 struct wm_softc *sc = device_private(self);
931 struct pci_attach_args *pa = aux;
932 prop_dictionary_t dict;
933 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
934 pci_chipset_tag_t pc = pa->pa_pc;
935 pci_intr_handle_t ih;
936 size_t cdata_size;
937 const char *intrstr = NULL;
938 const char *eetype, *xname;
939 bus_space_tag_t memt;
940 bus_space_handle_t memh;
941 bus_dma_segment_t seg;
942 int memh_valid;
943 int i, rseg, error;
944 const struct wm_product *wmp;
945 prop_data_t ea;
946 prop_number_t pn;
947 uint8_t enaddr[ETHER_ADDR_LEN];
948 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
949 pcireg_t preg, memtype;
950 uint32_t reg;
951
952 sc->sc_dev = self;
953 callout_init(&sc->sc_tick_ch, 0);
954
955 wmp = wm_lookup(pa);
956 if (wmp == NULL) {
957 printf("\n");
958 panic("wm_attach: impossible");
959 }
960
961 sc->sc_pc = pa->pa_pc;
962 sc->sc_pcitag = pa->pa_tag;
963
964 if (pci_dma64_available(pa))
965 sc->sc_dmat = pa->pa_dmat64;
966 else
967 sc->sc_dmat = pa->pa_dmat;
968
969 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
970 aprint_naive(": Ethernet controller\n");
971 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
972
973 sc->sc_type = wmp->wmp_type;
974 if (sc->sc_type < WM_T_82543) {
975 if (preg < 2) {
976 aprint_error_dev(sc->sc_dev,
977 "i82542 must be at least rev. 2\n");
978 return;
979 }
980 if (preg < 3)
981 sc->sc_type = WM_T_82542_2_0;
982 }
983
984 /* Set device properties */
985 dict = device_properties(sc->sc_dev);
986 prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
987
988 /*
989 * Map the device. All devices support memory-mapped acccess,
990 * and it is really required for normal operation.
991 */
992 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
993 switch (memtype) {
994 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
995 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
996 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
997 memtype, 0, &memt, &memh, NULL, NULL) == 0);
998 break;
999 default:
1000 memh_valid = 0;
1001 }
1002
1003 if (memh_valid) {
1004 sc->sc_st = memt;
1005 sc->sc_sh = memh;
1006 } else {
1007 aprint_error_dev(sc->sc_dev,
1008 "unable to map device registers\n");
1009 return;
1010 }
1011
1012 /*
1013 * In addition, i82544 and later support I/O mapped indirect
1014 * register access. It is not desirable (nor supported in
1015 * this driver) to use it for normal operation, though it is
1016 * required to work around bugs in some chip versions.
1017 */
1018 if (sc->sc_type >= WM_T_82544) {
1019 /* First we have to find the I/O BAR. */
1020 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1021 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1022 PCI_MAPREG_TYPE_IO)
1023 break;
1024 }
1025 if (i == PCI_MAPREG_END)
1026 aprint_error_dev(sc->sc_dev,
1027 "WARNING: unable to find I/O BAR\n");
1028 else {
1029 /*
1030 * The i8254x doesn't apparently respond when the
1031 * I/O BAR is 0, which looks somewhat like it's not
1032 * been configured.
1033 */
1034 preg = pci_conf_read(pc, pa->pa_tag, i);
1035 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1036 aprint_error_dev(sc->sc_dev,
1037 "WARNING: I/O BAR at zero.\n");
1038 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1039 0, &sc->sc_iot, &sc->sc_ioh,
1040 NULL, NULL) == 0) {
1041 sc->sc_flags |= WM_F_IOH_VALID;
1042 } else {
1043 aprint_error_dev(sc->sc_dev,
1044 "WARNING: unable to map I/O space\n");
1045 }
1046 }
1047
1048 }
1049
1050 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1051 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1052 preg |= PCI_COMMAND_MASTER_ENABLE;
1053 if (sc->sc_type < WM_T_82542_2_1)
1054 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1055 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1056
1057 /* power up chip */
1058 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1059 NULL)) && error != EOPNOTSUPP) {
1060 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1061 return;
1062 }
1063
1064 /*
1065 * Map and establish our interrupt.
1066 */
1067 if (pci_intr_map(pa, &ih)) {
1068 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1069 return;
1070 }
1071 intrstr = pci_intr_string(pc, ih);
1072 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1073 if (sc->sc_ih == NULL) {
1074 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1075 if (intrstr != NULL)
1076 aprint_error(" at %s", intrstr);
1077 aprint_error("\n");
1078 return;
1079 }
1080 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1081
1082 /*
1083 * Determine a few things about the bus we're connected to.
1084 */
1085 if (sc->sc_type < WM_T_82543) {
1086 /* We don't really know the bus characteristics here. */
1087 sc->sc_bus_speed = 33;
1088 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1089 /*
1090 * CSA (Communication Streaming Architecture) is about as fast
1091 * a 32-bit 66MHz PCI Bus.
1092 */
1093 sc->sc_flags |= WM_F_CSA;
1094 sc->sc_bus_speed = 66;
1095 aprint_verbose_dev(sc->sc_dev,
1096 "Communication Streaming Architecture\n");
1097 if (sc->sc_type == WM_T_82547) {
1098 callout_init(&sc->sc_txfifo_ch, 0);
1099 callout_setfunc(&sc->sc_txfifo_ch,
1100 wm_82547_txfifo_stall, sc);
1101 aprint_verbose_dev(sc->sc_dev,
1102 "using 82547 Tx FIFO stall work-around\n");
1103 }
1104 } else if (sc->sc_type >= WM_T_82571) {
1105 sc->sc_flags |= WM_F_PCIE;
1106 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1107 && (sc->sc_type != WM_T_ICH10))
1108 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1109 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1110 } else {
1111 reg = CSR_READ(sc, WMREG_STATUS);
1112 if (reg & STATUS_BUS64)
1113 sc->sc_flags |= WM_F_BUS64;
1114 if ((reg & STATUS_PCIX_MODE) != 0) {
1115 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1116
1117 sc->sc_flags |= WM_F_PCIX;
1118 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1119 PCI_CAP_PCIX,
1120 &sc->sc_pcix_offset, NULL) == 0)
1121 aprint_error_dev(sc->sc_dev,
1122 "unable to find PCIX capability\n");
1123 else if (sc->sc_type != WM_T_82545_3 &&
1124 sc->sc_type != WM_T_82546_3) {
1125 /*
1126 * Work around a problem caused by the BIOS
1127 * setting the max memory read byte count
1128 * incorrectly.
1129 */
1130 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1131 sc->sc_pcix_offset + PCI_PCIX_CMD);
1132 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1133 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1134
1135 bytecnt =
1136 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1137 PCI_PCIX_CMD_BYTECNT_SHIFT;
1138 maxb =
1139 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1140 PCI_PCIX_STATUS_MAXB_SHIFT;
1141 if (bytecnt > maxb) {
1142 aprint_verbose_dev(sc->sc_dev,
1143 "resetting PCI-X MMRBC: %d -> %d\n",
1144 512 << bytecnt, 512 << maxb);
1145 pcix_cmd = (pcix_cmd &
1146 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1147 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1148 pci_conf_write(pa->pa_pc, pa->pa_tag,
1149 sc->sc_pcix_offset + PCI_PCIX_CMD,
1150 pcix_cmd);
1151 }
1152 }
1153 }
1154 /*
1155 * The quad port adapter is special; it has a PCIX-PCIX
1156 * bridge on the board, and can run the secondary bus at
1157 * a higher speed.
1158 */
1159 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1160 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1161 : 66;
1162 } else if (sc->sc_flags & WM_F_PCIX) {
1163 switch (reg & STATUS_PCIXSPD_MASK) {
1164 case STATUS_PCIXSPD_50_66:
1165 sc->sc_bus_speed = 66;
1166 break;
1167 case STATUS_PCIXSPD_66_100:
1168 sc->sc_bus_speed = 100;
1169 break;
1170 case STATUS_PCIXSPD_100_133:
1171 sc->sc_bus_speed = 133;
1172 break;
1173 default:
1174 aprint_error_dev(sc->sc_dev,
1175 "unknown PCIXSPD %d; assuming 66MHz\n",
1176 reg & STATUS_PCIXSPD_MASK);
1177 sc->sc_bus_speed = 66;
1178 }
1179 } else
1180 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1181 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1182 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1183 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1184 }
1185
1186 /*
1187 * Allocate the control data structures, and create and load the
1188 * DMA map for it.
1189 *
1190 * NOTE: All Tx descriptors must be in the same 4G segment of
1191 * memory. So must Rx descriptors. We simplify by allocating
1192 * both sets within the same 4G segment.
1193 */
1194 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1195 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1196 cdata_size = sc->sc_type < WM_T_82544 ?
1197 sizeof(struct wm_control_data_82542) :
1198 sizeof(struct wm_control_data_82544);
1199 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1200 (bus_size_t) 0x100000000ULL,
1201 &seg, 1, &rseg, 0)) != 0) {
1202 aprint_error_dev(sc->sc_dev,
1203 "unable to allocate control data, error = %d\n",
1204 error);
1205 goto fail_0;
1206 }
1207
1208 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1209 (void **)&sc->sc_control_data,
1210 BUS_DMA_COHERENT)) != 0) {
1211 aprint_error_dev(sc->sc_dev,
1212 "unable to map control data, error = %d\n", error);
1213 goto fail_1;
1214 }
1215
1216 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1217 0, 0, &sc->sc_cddmamap)) != 0) {
1218 aprint_error_dev(sc->sc_dev,
1219 "unable to create control data DMA map, error = %d\n",
1220 error);
1221 goto fail_2;
1222 }
1223
1224 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1225 sc->sc_control_data, cdata_size, NULL,
1226 0)) != 0) {
1227 aprint_error_dev(sc->sc_dev,
1228 "unable to load control data DMA map, error = %d\n",
1229 error);
1230 goto fail_3;
1231 }
1232
1233
1234 /*
1235 * Create the transmit buffer DMA maps.
1236 */
1237 WM_TXQUEUELEN(sc) =
1238 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1239 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1240 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1241 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1242 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1243 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1244 aprint_error_dev(sc->sc_dev,
1245 "unable to create Tx DMA map %d, error = %d\n",
1246 i, error);
1247 goto fail_4;
1248 }
1249 }
1250
1251 /*
1252 * Create the receive buffer DMA maps.
1253 */
1254 for (i = 0; i < WM_NRXDESC; i++) {
1255 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1256 MCLBYTES, 0, 0,
1257 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1258 aprint_error_dev(sc->sc_dev,
1259 "unable to create Rx DMA map %d error = %d\n",
1260 i, error);
1261 goto fail_5;
1262 }
1263 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1264 }
1265
1266 /* clear interesting stat counters */
1267 CSR_READ(sc, WMREG_COLC);
1268 CSR_READ(sc, WMREG_RXERRC);
1269
1270 /*
1271 * Reset the chip to a known state.
1272 */
1273 wm_reset(sc);
1274
1275 switch (sc->sc_type) {
1276 case WM_T_82571:
1277 case WM_T_82572:
1278 case WM_T_82573:
1279 case WM_T_82574:
1280 case WM_T_80003:
1281 case WM_T_ICH8:
1282 case WM_T_ICH9:
1283 case WM_T_ICH10:
1284 if (wm_check_mng_mode(sc) != 0)
1285 wm_get_hw_control(sc);
1286 break;
1287 default:
1288 break;
1289 }
1290
1291 /*
1292 * Get some information about the EEPROM.
1293 */
1294 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
1295 || (sc->sc_type == WM_T_ICH10)) {
1296 uint32_t flash_size;
1297 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1298 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1299 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1300 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1301 aprint_error_dev(sc->sc_dev,
1302 "can't map FLASH registers\n");
1303 return;
1304 }
1305 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1306 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1307 ICH_FLASH_SECTOR_SIZE;
1308 sc->sc_ich8_flash_bank_size =
1309 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1310 sc->sc_ich8_flash_bank_size -=
1311 (flash_size & ICH_GFPREG_BASE_MASK);
1312 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1313 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1314 } else if (sc->sc_type == WM_T_80003)
1315 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1316 else if (sc->sc_type == WM_T_82573)
1317 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1318 else if (sc->sc_type == WM_T_82574)
1319 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1320 else if (sc->sc_type > WM_T_82544)
1321 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1322
1323 if (sc->sc_type <= WM_T_82544)
1324 sc->sc_ee_addrbits = 6;
1325 else if (sc->sc_type <= WM_T_82546_3) {
1326 reg = CSR_READ(sc, WMREG_EECD);
1327 if (reg & EECD_EE_SIZE)
1328 sc->sc_ee_addrbits = 8;
1329 else
1330 sc->sc_ee_addrbits = 6;
1331 } else if (sc->sc_type <= WM_T_82547_2) {
1332 reg = CSR_READ(sc, WMREG_EECD);
1333 if (reg & EECD_EE_TYPE) {
1334 sc->sc_flags |= WM_F_EEPROM_SPI;
1335 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1336 } else
1337 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1338 } else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1339 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1340 sc->sc_flags |= WM_F_EEPROM_FLASH;
1341 } else {
1342 /* Assume everything else is SPI. */
1343 reg = CSR_READ(sc, WMREG_EECD);
1344 sc->sc_flags |= WM_F_EEPROM_SPI;
1345 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1346 }
1347
1348 /*
1349 * Defer printing the EEPROM type until after verifying the checksum
1350 * This allows the EEPROM type to be printed correctly in the case
1351 * that no EEPROM is attached.
1352 */
1353
1354 /*
1355 * Validate the EEPROM checksum. If the checksum fails, flag this for
1356 * later, so we can fail future reads from the EEPROM.
1357 */
1358 if (wm_validate_eeprom_checksum(sc)) {
1359 /*
1360 * Read twice again because some PCI-e parts fail the first
1361 * check due to the link being in sleep state.
1362 */
1363 if (wm_validate_eeprom_checksum(sc))
1364 sc->sc_flags |= WM_F_EEPROM_INVALID;
1365 }
1366
1367 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1368 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1369 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1370 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1371 } else {
1372 if (sc->sc_flags & WM_F_EEPROM_SPI)
1373 eetype = "SPI";
1374 else
1375 eetype = "MicroWire";
1376 aprint_verbose_dev(sc->sc_dev,
1377 "%u word (%d address bits) %s EEPROM\n",
1378 1U << sc->sc_ee_addrbits,
1379 sc->sc_ee_addrbits, eetype);
1380 }
1381
1382 /*
1383 * Read the Ethernet address from the EEPROM, if not first found
1384 * in device properties.
1385 */
1386 ea = prop_dictionary_get(dict, "mac-addr");
1387 if (ea != NULL) {
1388 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1389 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1390 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1391 } else {
1392 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1393 sizeof(myea) / sizeof(myea[0]), myea)) {
1394 aprint_error_dev(sc->sc_dev,
1395 "unable to read Ethernet address\n");
1396 return;
1397 }
1398 enaddr[0] = myea[0] & 0xff;
1399 enaddr[1] = myea[0] >> 8;
1400 enaddr[2] = myea[1] & 0xff;
1401 enaddr[3] = myea[1] >> 8;
1402 enaddr[4] = myea[2] & 0xff;
1403 enaddr[5] = myea[2] >> 8;
1404 }
1405
1406 /*
1407 * Toggle the LSB of the MAC address on the second port
1408 * of the dual port controller.
1409 */
1410 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1411 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1412 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1413 enaddr[5] ^= 1;
1414 }
1415
1416 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1417 ether_sprintf(enaddr));
1418
1419 /*
1420 * Read the config info from the EEPROM, and set up various
1421 * bits in the control registers based on their contents.
1422 */
1423 pn = prop_dictionary_get(dict, "i82543-cfg1");
1424 if (pn != NULL) {
1425 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1426 cfg1 = (uint16_t) prop_number_integer_value(pn);
1427 } else {
1428 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1429 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1430 return;
1431 }
1432 }
1433
1434 pn = prop_dictionary_get(dict, "i82543-cfg2");
1435 if (pn != NULL) {
1436 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1437 cfg2 = (uint16_t) prop_number_integer_value(pn);
1438 } else {
1439 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1440 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1441 return;
1442 }
1443 }
1444
1445 if (sc->sc_type >= WM_T_82544) {
1446 pn = prop_dictionary_get(dict, "i82543-swdpin");
1447 if (pn != NULL) {
1448 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1449 swdpin = (uint16_t) prop_number_integer_value(pn);
1450 } else {
1451 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1452 aprint_error_dev(sc->sc_dev,
1453 "unable to read SWDPIN\n");
1454 return;
1455 }
1456 }
1457 }
1458
1459 if (cfg1 & EEPROM_CFG1_ILOS)
1460 sc->sc_ctrl |= CTRL_ILOS;
1461 if (sc->sc_type >= WM_T_82544) {
1462 sc->sc_ctrl |=
1463 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1464 CTRL_SWDPIO_SHIFT;
1465 sc->sc_ctrl |=
1466 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1467 CTRL_SWDPINS_SHIFT;
1468 } else {
1469 sc->sc_ctrl |=
1470 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1471 CTRL_SWDPIO_SHIFT;
1472 }
1473
1474 #if 0
1475 if (sc->sc_type >= WM_T_82544) {
1476 if (cfg1 & EEPROM_CFG1_IPS0)
1477 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1478 if (cfg1 & EEPROM_CFG1_IPS1)
1479 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1480 sc->sc_ctrl_ext |=
1481 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1482 CTRL_EXT_SWDPIO_SHIFT;
1483 sc->sc_ctrl_ext |=
1484 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1485 CTRL_EXT_SWDPINS_SHIFT;
1486 } else {
1487 sc->sc_ctrl_ext |=
1488 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1489 CTRL_EXT_SWDPIO_SHIFT;
1490 }
1491 #endif
1492
1493 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1494 #if 0
1495 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1496 #endif
1497
1498 /*
1499 * Set up some register offsets that are different between
1500 * the i82542 and the i82543 and later chips.
1501 */
1502 if (sc->sc_type < WM_T_82543) {
1503 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1504 sc->sc_tdt_reg = WMREG_OLD_TDT;
1505 } else {
1506 sc->sc_rdt_reg = WMREG_RDT;
1507 sc->sc_tdt_reg = WMREG_TDT;
1508 }
1509
1510 /*
1511 * Determine if we're TBI or GMII mode, and initialize the
1512 * media structures accordingly.
1513 */
1514 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1515 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1516 || sc->sc_type == WM_T_82574) {
1517 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1518 wm_gmii_mediainit(sc);
1519 } else if (sc->sc_type < WM_T_82543 ||
1520 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1521 if (wmp->wmp_flags & WMP_F_1000T)
1522 aprint_error_dev(sc->sc_dev,
1523 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1524 wm_tbi_mediainit(sc);
1525 } else {
1526 if (wmp->wmp_flags & WMP_F_1000X)
1527 aprint_error_dev(sc->sc_dev,
1528 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1529 wm_gmii_mediainit(sc);
1530 }
1531
1532 ifp = &sc->sc_ethercom.ec_if;
1533 xname = device_xname(sc->sc_dev);
1534 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1535 ifp->if_softc = sc;
1536 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1537 ifp->if_ioctl = wm_ioctl;
1538 ifp->if_start = wm_start;
1539 ifp->if_watchdog = wm_watchdog;
1540 ifp->if_init = wm_init;
1541 ifp->if_stop = wm_stop;
1542 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1543 IFQ_SET_READY(&ifp->if_snd);
1544
1545 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1546 sc->sc_type != WM_T_ICH8)
1547 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1548
1549 /*
1550 * If we're a i82543 or greater, we can support VLANs.
1551 */
1552 if (sc->sc_type >= WM_T_82543)
1553 sc->sc_ethercom.ec_capabilities |=
1554 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1555
1556 /*
1557 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1558 * on i82543 and later.
1559 */
1560 if (sc->sc_type >= WM_T_82543) {
1561 ifp->if_capabilities |=
1562 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1563 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1564 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1565 IFCAP_CSUM_TCPv6_Tx |
1566 IFCAP_CSUM_UDPv6_Tx;
1567 }
1568
1569 /*
1570 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1571 *
1572 * 82541GI (8086:1076) ... no
1573 * 82572EI (8086:10b9) ... yes
1574 */
1575 if (sc->sc_type >= WM_T_82571) {
1576 ifp->if_capabilities |=
1577 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1578 }
1579
1580 /*
1581 * If we're a i82544 or greater (except i82547), we can do
1582 * TCP segmentation offload.
1583 */
1584 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1585 ifp->if_capabilities |= IFCAP_TSOv4;
1586 }
1587
1588 if (sc->sc_type >= WM_T_82571) {
1589 ifp->if_capabilities |= IFCAP_TSOv6;
1590 }
1591
1592 /*
1593 * Attach the interface.
1594 */
1595 if_attach(ifp);
1596 ether_ifattach(ifp, enaddr);
1597 #if NRND > 0
1598 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1599 #endif
1600
1601 #ifdef WM_EVENT_COUNTERS
1602 /* Attach event counters. */
1603 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1604 NULL, xname, "txsstall");
1605 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1606 NULL, xname, "txdstall");
1607 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1608 NULL, xname, "txfifo_stall");
1609 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1610 NULL, xname, "txdw");
1611 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1612 NULL, xname, "txqe");
1613 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1614 NULL, xname, "rxintr");
1615 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1616 NULL, xname, "linkintr");
1617
1618 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1619 NULL, xname, "rxipsum");
1620 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1621 NULL, xname, "rxtusum");
1622 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1623 NULL, xname, "txipsum");
1624 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1625 NULL, xname, "txtusum");
1626 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1627 NULL, xname, "txtusum6");
1628
1629 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1630 NULL, xname, "txtso");
1631 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1632 NULL, xname, "txtso6");
1633 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1634 NULL, xname, "txtsopain");
1635
1636 for (i = 0; i < WM_NTXSEGS; i++) {
1637 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1638 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1639 NULL, xname, wm_txseg_evcnt_names[i]);
1640 }
1641
1642 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1643 NULL, xname, "txdrop");
1644
1645 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1646 NULL, xname, "tu");
1647
1648 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1649 NULL, xname, "tx_xoff");
1650 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1651 NULL, xname, "tx_xon");
1652 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1653 NULL, xname, "rx_xoff");
1654 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1655 NULL, xname, "rx_xon");
1656 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1657 NULL, xname, "rx_macctl");
1658 #endif /* WM_EVENT_COUNTERS */
1659
1660 if (pmf_device_register(self, NULL, NULL))
1661 pmf_class_network_register(self, ifp);
1662 else
1663 aprint_error_dev(self, "couldn't establish power handler\n");
1664
1665 return;
1666
1667 /*
1668 * Free any resources we've allocated during the failed attach
1669 * attempt. Do this in reverse order and fall through.
1670 */
1671 fail_5:
1672 for (i = 0; i < WM_NRXDESC; i++) {
1673 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1674 bus_dmamap_destroy(sc->sc_dmat,
1675 sc->sc_rxsoft[i].rxs_dmamap);
1676 }
1677 fail_4:
1678 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1679 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1680 bus_dmamap_destroy(sc->sc_dmat,
1681 sc->sc_txsoft[i].txs_dmamap);
1682 }
1683 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1684 fail_3:
1685 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1686 fail_2:
1687 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1688 cdata_size);
1689 fail_1:
1690 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1691 fail_0:
1692 return;
1693 }
1694
1695 /*
1696 * wm_tx_offload:
1697 *
1698 * Set up TCP/IP checksumming parameters for the
1699 * specified packet.
1700 */
1701 static int
1702 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1703 uint8_t *fieldsp)
1704 {
1705 struct mbuf *m0 = txs->txs_mbuf;
1706 struct livengood_tcpip_ctxdesc *t;
1707 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1708 uint32_t ipcse;
1709 struct ether_header *eh;
1710 int offset, iphl;
1711 uint8_t fields;
1712
1713 /*
1714 * XXX It would be nice if the mbuf pkthdr had offset
1715 * fields for the protocol headers.
1716 */
1717
1718 eh = mtod(m0, struct ether_header *);
1719 switch (htons(eh->ether_type)) {
1720 case ETHERTYPE_IP:
1721 case ETHERTYPE_IPV6:
1722 offset = ETHER_HDR_LEN;
1723 break;
1724
1725 case ETHERTYPE_VLAN:
1726 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1727 break;
1728
1729 default:
1730 /*
1731 * Don't support this protocol or encapsulation.
1732 */
1733 *fieldsp = 0;
1734 *cmdp = 0;
1735 return (0);
1736 }
1737
1738 if ((m0->m_pkthdr.csum_flags &
1739 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1740 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1741 } else {
1742 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1743 }
1744 ipcse = offset + iphl - 1;
1745
1746 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1747 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1748 seg = 0;
1749 fields = 0;
1750
1751 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1752 int hlen = offset + iphl;
1753 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1754
1755 if (__predict_false(m0->m_len <
1756 (hlen + sizeof(struct tcphdr)))) {
1757 /*
1758 * TCP/IP headers are not in the first mbuf; we need
1759 * to do this the slow and painful way. Let's just
1760 * hope this doesn't happen very often.
1761 */
1762 struct tcphdr th;
1763
1764 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1765
1766 m_copydata(m0, hlen, sizeof(th), &th);
1767 if (v4) {
1768 struct ip ip;
1769
1770 m_copydata(m0, offset, sizeof(ip), &ip);
1771 ip.ip_len = 0;
1772 m_copyback(m0,
1773 offset + offsetof(struct ip, ip_len),
1774 sizeof(ip.ip_len), &ip.ip_len);
1775 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1776 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1777 } else {
1778 struct ip6_hdr ip6;
1779
1780 m_copydata(m0, offset, sizeof(ip6), &ip6);
1781 ip6.ip6_plen = 0;
1782 m_copyback(m0,
1783 offset + offsetof(struct ip6_hdr, ip6_plen),
1784 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1785 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1786 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1787 }
1788 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1789 sizeof(th.th_sum), &th.th_sum);
1790
1791 hlen += th.th_off << 2;
1792 } else {
1793 /*
1794 * TCP/IP headers are in the first mbuf; we can do
1795 * this the easy way.
1796 */
1797 struct tcphdr *th;
1798
1799 if (v4) {
1800 struct ip *ip =
1801 (void *)(mtod(m0, char *) + offset);
1802 th = (void *)(mtod(m0, char *) + hlen);
1803
1804 ip->ip_len = 0;
1805 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1806 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1807 } else {
1808 struct ip6_hdr *ip6 =
1809 (void *)(mtod(m0, char *) + offset);
1810 th = (void *)(mtod(m0, char *) + hlen);
1811
1812 ip6->ip6_plen = 0;
1813 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1814 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1815 }
1816 hlen += th->th_off << 2;
1817 }
1818
1819 if (v4) {
1820 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1821 cmdlen |= WTX_TCPIP_CMD_IP;
1822 } else {
1823 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1824 ipcse = 0;
1825 }
1826 cmd |= WTX_TCPIP_CMD_TSE;
1827 cmdlen |= WTX_TCPIP_CMD_TSE |
1828 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1829 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1830 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1831 }
1832
1833 /*
1834 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1835 * offload feature, if we load the context descriptor, we
1836 * MUST provide valid values for IPCSS and TUCSS fields.
1837 */
1838
1839 ipcs = WTX_TCPIP_IPCSS(offset) |
1840 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1841 WTX_TCPIP_IPCSE(ipcse);
1842 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1843 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1844 fields |= WTX_IXSM;
1845 }
1846
1847 offset += iphl;
1848
1849 if (m0->m_pkthdr.csum_flags &
1850 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1851 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1852 fields |= WTX_TXSM;
1853 tucs = WTX_TCPIP_TUCSS(offset) |
1854 WTX_TCPIP_TUCSO(offset +
1855 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1856 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1857 } else if ((m0->m_pkthdr.csum_flags &
1858 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1859 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1860 fields |= WTX_TXSM;
1861 tucs = WTX_TCPIP_TUCSS(offset) |
1862 WTX_TCPIP_TUCSO(offset +
1863 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1864 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1865 } else {
1866 /* Just initialize it to a valid TCP context. */
1867 tucs = WTX_TCPIP_TUCSS(offset) |
1868 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1869 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1870 }
1871
1872 /* Fill in the context descriptor. */
1873 t = (struct livengood_tcpip_ctxdesc *)
1874 &sc->sc_txdescs[sc->sc_txnext];
1875 t->tcpip_ipcs = htole32(ipcs);
1876 t->tcpip_tucs = htole32(tucs);
1877 t->tcpip_cmdlen = htole32(cmdlen);
1878 t->tcpip_seg = htole32(seg);
1879 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1880
1881 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1882 txs->txs_ndesc++;
1883
1884 *cmdp = cmd;
1885 *fieldsp = fields;
1886
1887 return (0);
1888 }
1889
1890 static void
1891 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1892 {
1893 struct mbuf *m;
1894 int i;
1895
1896 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1897 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1898 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1899 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1900 m->m_data, m->m_len, m->m_flags);
1901 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1902 i, i == 1 ? "" : "s");
1903 }
1904
1905 /*
1906 * wm_82547_txfifo_stall:
1907 *
1908 * Callout used to wait for the 82547 Tx FIFO to drain,
1909 * reset the FIFO pointers, and restart packet transmission.
1910 */
1911 static void
1912 wm_82547_txfifo_stall(void *arg)
1913 {
1914 struct wm_softc *sc = arg;
1915 int s;
1916
1917 s = splnet();
1918
1919 if (sc->sc_txfifo_stall) {
1920 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1921 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1922 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1923 /*
1924 * Packets have drained. Stop transmitter, reset
1925 * FIFO pointers, restart transmitter, and kick
1926 * the packet queue.
1927 */
1928 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1929 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1930 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1931 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1932 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1933 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1934 CSR_WRITE(sc, WMREG_TCTL, tctl);
1935 CSR_WRITE_FLUSH(sc);
1936
1937 sc->sc_txfifo_head = 0;
1938 sc->sc_txfifo_stall = 0;
1939 wm_start(&sc->sc_ethercom.ec_if);
1940 } else {
1941 /*
1942 * Still waiting for packets to drain; try again in
1943 * another tick.
1944 */
1945 callout_schedule(&sc->sc_txfifo_ch, 1);
1946 }
1947 }
1948
1949 splx(s);
1950 }
1951
1952 /*
1953 * wm_82547_txfifo_bugchk:
1954 *
1955 * Check for bug condition in the 82547 Tx FIFO. We need to
1956 * prevent enqueueing a packet that would wrap around the end
1957 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1958 *
1959 * We do this by checking the amount of space before the end
1960 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1961 * the Tx FIFO, wait for all remaining packets to drain, reset
1962 * the internal FIFO pointers to the beginning, and restart
1963 * transmission on the interface.
1964 */
1965 #define WM_FIFO_HDR 0x10
1966 #define WM_82547_PAD_LEN 0x3e0
1967 static int
1968 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1969 {
1970 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1971 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1972
1973 /* Just return if already stalled. */
1974 if (sc->sc_txfifo_stall)
1975 return (1);
1976
1977 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1978 /* Stall only occurs in half-duplex mode. */
1979 goto send_packet;
1980 }
1981
1982 if (len >= WM_82547_PAD_LEN + space) {
1983 sc->sc_txfifo_stall = 1;
1984 callout_schedule(&sc->sc_txfifo_ch, 1);
1985 return (1);
1986 }
1987
1988 send_packet:
1989 sc->sc_txfifo_head += len;
1990 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1991 sc->sc_txfifo_head -= sc->sc_txfifo_size;
1992
1993 return (0);
1994 }
1995
1996 /*
1997 * wm_start: [ifnet interface function]
1998 *
1999 * Start packet transmission on the interface.
2000 */
2001 static void
2002 wm_start(struct ifnet *ifp)
2003 {
2004 struct wm_softc *sc = ifp->if_softc;
2005 struct mbuf *m0;
2006 struct m_tag *mtag;
2007 struct wm_txsoft *txs;
2008 bus_dmamap_t dmamap;
2009 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2010 bus_addr_t curaddr;
2011 bus_size_t seglen, curlen;
2012 uint32_t cksumcmd;
2013 uint8_t cksumfields;
2014
2015 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2016 return;
2017
2018 /*
2019 * Remember the previous number of free descriptors.
2020 */
2021 ofree = sc->sc_txfree;
2022
2023 /*
2024 * Loop through the send queue, setting up transmit descriptors
2025 * until we drain the queue, or use up all available transmit
2026 * descriptors.
2027 */
2028 for (;;) {
2029 /* Grab a packet off the queue. */
2030 IFQ_POLL(&ifp->if_snd, m0);
2031 if (m0 == NULL)
2032 break;
2033
2034 DPRINTF(WM_DEBUG_TX,
2035 ("%s: TX: have packet to transmit: %p\n",
2036 device_xname(sc->sc_dev), m0));
2037
2038 /* Get a work queue entry. */
2039 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2040 wm_txintr(sc);
2041 if (sc->sc_txsfree == 0) {
2042 DPRINTF(WM_DEBUG_TX,
2043 ("%s: TX: no free job descriptors\n",
2044 device_xname(sc->sc_dev)));
2045 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2046 break;
2047 }
2048 }
2049
2050 txs = &sc->sc_txsoft[sc->sc_txsnext];
2051 dmamap = txs->txs_dmamap;
2052
2053 use_tso = (m0->m_pkthdr.csum_flags &
2054 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2055
2056 /*
2057 * So says the Linux driver:
2058 * The controller does a simple calculation to make sure
2059 * there is enough room in the FIFO before initiating the
2060 * DMA for each buffer. The calc is:
2061 * 4 = ceil(buffer len / MSS)
2062 * To make sure we don't overrun the FIFO, adjust the max
2063 * buffer len if the MSS drops.
2064 */
2065 dmamap->dm_maxsegsz =
2066 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2067 ? m0->m_pkthdr.segsz << 2
2068 : WTX_MAX_LEN;
2069
2070 /*
2071 * Load the DMA map. If this fails, the packet either
2072 * didn't fit in the allotted number of segments, or we
2073 * were short on resources. For the too-many-segments
2074 * case, we simply report an error and drop the packet,
2075 * since we can't sanely copy a jumbo packet to a single
2076 * buffer.
2077 */
2078 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2079 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2080 if (error) {
2081 if (error == EFBIG) {
2082 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2083 log(LOG_ERR, "%s: Tx packet consumes too many "
2084 "DMA segments, dropping...\n",
2085 device_xname(sc->sc_dev));
2086 IFQ_DEQUEUE(&ifp->if_snd, m0);
2087 wm_dump_mbuf_chain(sc, m0);
2088 m_freem(m0);
2089 continue;
2090 }
2091 /*
2092 * Short on resources, just stop for now.
2093 */
2094 DPRINTF(WM_DEBUG_TX,
2095 ("%s: TX: dmamap load failed: %d\n",
2096 device_xname(sc->sc_dev), error));
2097 break;
2098 }
2099
2100 segs_needed = dmamap->dm_nsegs;
2101 if (use_tso) {
2102 /* For sentinel descriptor; see below. */
2103 segs_needed++;
2104 }
2105
2106 /*
2107 * Ensure we have enough descriptors free to describe
2108 * the packet. Note, we always reserve one descriptor
2109 * at the end of the ring due to the semantics of the
2110 * TDT register, plus one more in the event we need
2111 * to load offload context.
2112 */
2113 if (segs_needed > sc->sc_txfree - 2) {
2114 /*
2115 * Not enough free descriptors to transmit this
2116 * packet. We haven't committed anything yet,
2117 * so just unload the DMA map, put the packet
2118 * pack on the queue, and punt. Notify the upper
2119 * layer that there are no more slots left.
2120 */
2121 DPRINTF(WM_DEBUG_TX,
2122 ("%s: TX: need %d (%d) descriptors, have %d\n",
2123 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2124 segs_needed, sc->sc_txfree - 1));
2125 ifp->if_flags |= IFF_OACTIVE;
2126 bus_dmamap_unload(sc->sc_dmat, dmamap);
2127 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2128 break;
2129 }
2130
2131 /*
2132 * Check for 82547 Tx FIFO bug. We need to do this
2133 * once we know we can transmit the packet, since we
2134 * do some internal FIFO space accounting here.
2135 */
2136 if (sc->sc_type == WM_T_82547 &&
2137 wm_82547_txfifo_bugchk(sc, m0)) {
2138 DPRINTF(WM_DEBUG_TX,
2139 ("%s: TX: 82547 Tx FIFO bug detected\n",
2140 device_xname(sc->sc_dev)));
2141 ifp->if_flags |= IFF_OACTIVE;
2142 bus_dmamap_unload(sc->sc_dmat, dmamap);
2143 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2144 break;
2145 }
2146
2147 IFQ_DEQUEUE(&ifp->if_snd, m0);
2148
2149 /*
2150 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2151 */
2152
2153 DPRINTF(WM_DEBUG_TX,
2154 ("%s: TX: packet has %d (%d) DMA segments\n",
2155 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2156
2157 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2158
2159 /*
2160 * Store a pointer to the packet so that we can free it
2161 * later.
2162 *
2163 * Initially, we consider the number of descriptors the
2164 * packet uses the number of DMA segments. This may be
2165 * incremented by 1 if we do checksum offload (a descriptor
2166 * is used to set the checksum context).
2167 */
2168 txs->txs_mbuf = m0;
2169 txs->txs_firstdesc = sc->sc_txnext;
2170 txs->txs_ndesc = segs_needed;
2171
2172 /* Set up offload parameters for this packet. */
2173 if (m0->m_pkthdr.csum_flags &
2174 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2175 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2176 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2177 if (wm_tx_offload(sc, txs, &cksumcmd,
2178 &cksumfields) != 0) {
2179 /* Error message already displayed. */
2180 bus_dmamap_unload(sc->sc_dmat, dmamap);
2181 continue;
2182 }
2183 } else {
2184 cksumcmd = 0;
2185 cksumfields = 0;
2186 }
2187
2188 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2189
2190 /* Sync the DMA map. */
2191 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2192 BUS_DMASYNC_PREWRITE);
2193
2194 /*
2195 * Initialize the transmit descriptor.
2196 */
2197 for (nexttx = sc->sc_txnext, seg = 0;
2198 seg < dmamap->dm_nsegs; seg++) {
2199 for (seglen = dmamap->dm_segs[seg].ds_len,
2200 curaddr = dmamap->dm_segs[seg].ds_addr;
2201 seglen != 0;
2202 curaddr += curlen, seglen -= curlen,
2203 nexttx = WM_NEXTTX(sc, nexttx)) {
2204 curlen = seglen;
2205
2206 /*
2207 * So says the Linux driver:
2208 * Work around for premature descriptor
2209 * write-backs in TSO mode. Append a
2210 * 4-byte sentinel descriptor.
2211 */
2212 if (use_tso &&
2213 seg == dmamap->dm_nsegs - 1 &&
2214 curlen > 8)
2215 curlen -= 4;
2216
2217 wm_set_dma_addr(
2218 &sc->sc_txdescs[nexttx].wtx_addr,
2219 curaddr);
2220 sc->sc_txdescs[nexttx].wtx_cmdlen =
2221 htole32(cksumcmd | curlen);
2222 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2223 0;
2224 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2225 cksumfields;
2226 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2227 lasttx = nexttx;
2228
2229 DPRINTF(WM_DEBUG_TX,
2230 ("%s: TX: desc %d: low 0x%08lx, "
2231 "len 0x%04x\n",
2232 device_xname(sc->sc_dev), nexttx,
2233 curaddr & 0xffffffffUL, (unsigned)curlen));
2234 }
2235 }
2236
2237 KASSERT(lasttx != -1);
2238
2239 /*
2240 * Set up the command byte on the last descriptor of
2241 * the packet. If we're in the interrupt delay window,
2242 * delay the interrupt.
2243 */
2244 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2245 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2246
2247 /*
2248 * If VLANs are enabled and the packet has a VLAN tag, set
2249 * up the descriptor to encapsulate the packet for us.
2250 *
2251 * This is only valid on the last descriptor of the packet.
2252 */
2253 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2254 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2255 htole32(WTX_CMD_VLE);
2256 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2257 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2258 }
2259
2260 txs->txs_lastdesc = lasttx;
2261
2262 DPRINTF(WM_DEBUG_TX,
2263 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2264 device_xname(sc->sc_dev),
2265 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2266
2267 /* Sync the descriptors we're using. */
2268 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2269 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2270
2271 /* Give the packet to the chip. */
2272 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2273
2274 DPRINTF(WM_DEBUG_TX,
2275 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2276
2277 DPRINTF(WM_DEBUG_TX,
2278 ("%s: TX: finished transmitting packet, job %d\n",
2279 device_xname(sc->sc_dev), sc->sc_txsnext));
2280
2281 /* Advance the tx pointer. */
2282 sc->sc_txfree -= txs->txs_ndesc;
2283 sc->sc_txnext = nexttx;
2284
2285 sc->sc_txsfree--;
2286 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2287
2288 #if NBPFILTER > 0
2289 /* Pass the packet to any BPF listeners. */
2290 if (ifp->if_bpf)
2291 bpf_mtap(ifp->if_bpf, m0);
2292 #endif /* NBPFILTER > 0 */
2293 }
2294
2295 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2296 /* No more slots; notify upper layer. */
2297 ifp->if_flags |= IFF_OACTIVE;
2298 }
2299
2300 if (sc->sc_txfree != ofree) {
2301 /* Set a watchdog timer in case the chip flakes out. */
2302 ifp->if_timer = 5;
2303 }
2304 }
2305
2306 /*
2307 * wm_watchdog: [ifnet interface function]
2308 *
2309 * Watchdog timer handler.
2310 */
2311 static void
2312 wm_watchdog(struct ifnet *ifp)
2313 {
2314 struct wm_softc *sc = ifp->if_softc;
2315
2316 /*
2317 * Since we're using delayed interrupts, sweep up
2318 * before we report an error.
2319 */
2320 wm_txintr(sc);
2321
2322 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2323 log(LOG_ERR,
2324 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2325 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2326 sc->sc_txnext);
2327 ifp->if_oerrors++;
2328
2329 /* Reset the interface. */
2330 (void) wm_init(ifp);
2331 }
2332
2333 /* Try to get more packets going. */
2334 wm_start(ifp);
2335 }
2336
2337 /*
2338 * wm_ioctl: [ifnet interface function]
2339 *
2340 * Handle control requests from the operator.
2341 */
2342 static int
2343 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2344 {
2345 struct wm_softc *sc = ifp->if_softc;
2346 struct ifreq *ifr = (struct ifreq *) data;
2347 struct ifaddr *ifa = (struct ifaddr *)data;
2348 struct sockaddr_dl *sdl;
2349 int diff, s, error;
2350
2351 s = splnet();
2352
2353 switch (cmd) {
2354 case SIOCSIFFLAGS:
2355 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
2356 break;
2357 if (ifp->if_flags & IFF_UP) {
2358 diff = (ifp->if_flags ^ sc->sc_if_flags)
2359 & (IFF_PROMISC | IFF_ALLMULTI);
2360 if ((diff & (IFF_PROMISC | IFF_ALLMULTI)) != 0) {
2361 /*
2362 * If the difference bettween last flag and
2363 * new flag is only IFF_PROMISC or
2364 * IFF_ALLMULTI, set multicast filter only
2365 * (don't reset to prevent link down).
2366 */
2367 wm_set_filter(sc);
2368 } else {
2369 /*
2370 * Reset the interface to pick up changes in
2371 * any other flags that affect the hardware
2372 * state.
2373 */
2374 wm_init(ifp);
2375 }
2376 } else {
2377 if (ifp->if_flags & IFF_RUNNING)
2378 wm_stop(ifp, 1);
2379 }
2380 sc->sc_if_flags = ifp->if_flags;
2381 error = 0;
2382 break;
2383 case SIOCSIFMEDIA:
2384 case SIOCGIFMEDIA:
2385 /* Flow control requires full-duplex mode. */
2386 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2387 (ifr->ifr_media & IFM_FDX) == 0)
2388 ifr->ifr_media &= ~IFM_ETH_FMASK;
2389 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2390 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2391 /* We can do both TXPAUSE and RXPAUSE. */
2392 ifr->ifr_media |=
2393 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2394 }
2395 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2396 }
2397 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2398 break;
2399 case SIOCINITIFADDR:
2400 if (ifa->ifa_addr->sa_family == AF_LINK) {
2401 sdl = satosdl(ifp->if_dl->ifa_addr);
2402 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2403 LLADDR(satosdl(ifa->ifa_addr)),
2404 ifp->if_addrlen);
2405 /* unicast address is first multicast entry */
2406 wm_set_filter(sc);
2407 error = 0;
2408 break;
2409 }
2410 /* Fall through for rest */
2411 default:
2412 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2413 break;
2414
2415 error = 0;
2416
2417 if (cmd == SIOCSIFCAP)
2418 error = (*ifp->if_init)(ifp);
2419 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2420 ;
2421 else if (ifp->if_flags & IFF_RUNNING) {
2422 /*
2423 * Multicast list has changed; set the hardware filter
2424 * accordingly.
2425 */
2426 wm_set_filter(sc);
2427 }
2428 break;
2429 }
2430
2431 /* Try to get more packets going. */
2432 wm_start(ifp);
2433
2434 splx(s);
2435 return (error);
2436 }
2437
2438 /*
2439 * wm_intr:
2440 *
2441 * Interrupt service routine.
2442 */
2443 static int
2444 wm_intr(void *arg)
2445 {
2446 struct wm_softc *sc = arg;
2447 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2448 uint32_t icr;
2449 int handled = 0;
2450
2451 while (1 /* CONSTCOND */) {
2452 icr = CSR_READ(sc, WMREG_ICR);
2453 if ((icr & sc->sc_icr) == 0)
2454 break;
2455 #if 0 /*NRND > 0*/
2456 if (RND_ENABLED(&sc->rnd_source))
2457 rnd_add_uint32(&sc->rnd_source, icr);
2458 #endif
2459
2460 handled = 1;
2461
2462 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2463 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2464 DPRINTF(WM_DEBUG_RX,
2465 ("%s: RX: got Rx intr 0x%08x\n",
2466 device_xname(sc->sc_dev),
2467 icr & (ICR_RXDMT0|ICR_RXT0)));
2468 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2469 }
2470 #endif
2471 wm_rxintr(sc);
2472
2473 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2474 if (icr & ICR_TXDW) {
2475 DPRINTF(WM_DEBUG_TX,
2476 ("%s: TX: got TXDW interrupt\n",
2477 device_xname(sc->sc_dev)));
2478 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2479 }
2480 #endif
2481 wm_txintr(sc);
2482
2483 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2484 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2485 wm_linkintr(sc, icr);
2486 }
2487
2488 if (icr & ICR_RXO) {
2489 ifp->if_ierrors++;
2490 #if defined(WM_DEBUG)
2491 log(LOG_WARNING, "%s: Receive overrun\n",
2492 device_xname(sc->sc_dev));
2493 #endif /* defined(WM_DEBUG) */
2494 }
2495 }
2496
2497 if (handled) {
2498 /* Try to get more packets going. */
2499 wm_start(ifp);
2500 }
2501
2502 return (handled);
2503 }
2504
2505 /*
2506 * wm_txintr:
2507 *
2508 * Helper; handle transmit interrupts.
2509 */
2510 static void
2511 wm_txintr(struct wm_softc *sc)
2512 {
2513 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2514 struct wm_txsoft *txs;
2515 uint8_t status;
2516 int i;
2517
2518 ifp->if_flags &= ~IFF_OACTIVE;
2519
2520 /*
2521 * Go through the Tx list and free mbufs for those
2522 * frames which have been transmitted.
2523 */
2524 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2525 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2526 txs = &sc->sc_txsoft[i];
2527
2528 DPRINTF(WM_DEBUG_TX,
2529 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2530
2531 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2532 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2533
2534 status =
2535 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2536 if ((status & WTX_ST_DD) == 0) {
2537 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2538 BUS_DMASYNC_PREREAD);
2539 break;
2540 }
2541
2542 DPRINTF(WM_DEBUG_TX,
2543 ("%s: TX: job %d done: descs %d..%d\n",
2544 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2545 txs->txs_lastdesc));
2546
2547 /*
2548 * XXX We should probably be using the statistics
2549 * XXX registers, but I don't know if they exist
2550 * XXX on chips before the i82544.
2551 */
2552
2553 #ifdef WM_EVENT_COUNTERS
2554 if (status & WTX_ST_TU)
2555 WM_EVCNT_INCR(&sc->sc_ev_tu);
2556 #endif /* WM_EVENT_COUNTERS */
2557
2558 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2559 ifp->if_oerrors++;
2560 if (status & WTX_ST_LC)
2561 log(LOG_WARNING, "%s: late collision\n",
2562 device_xname(sc->sc_dev));
2563 else if (status & WTX_ST_EC) {
2564 ifp->if_collisions += 16;
2565 log(LOG_WARNING, "%s: excessive collisions\n",
2566 device_xname(sc->sc_dev));
2567 }
2568 } else
2569 ifp->if_opackets++;
2570
2571 sc->sc_txfree += txs->txs_ndesc;
2572 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2573 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2574 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2575 m_freem(txs->txs_mbuf);
2576 txs->txs_mbuf = NULL;
2577 }
2578
2579 /* Update the dirty transmit buffer pointer. */
2580 sc->sc_txsdirty = i;
2581 DPRINTF(WM_DEBUG_TX,
2582 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2583
2584 /*
2585 * If there are no more pending transmissions, cancel the watchdog
2586 * timer.
2587 */
2588 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2589 ifp->if_timer = 0;
2590 }
2591
2592 /*
2593 * wm_rxintr:
2594 *
2595 * Helper; handle receive interrupts.
2596 */
2597 static void
2598 wm_rxintr(struct wm_softc *sc)
2599 {
2600 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2601 struct wm_rxsoft *rxs;
2602 struct mbuf *m;
2603 int i, len;
2604 uint8_t status, errors;
2605 uint16_t vlantag;
2606
2607 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2608 rxs = &sc->sc_rxsoft[i];
2609
2610 DPRINTF(WM_DEBUG_RX,
2611 ("%s: RX: checking descriptor %d\n",
2612 device_xname(sc->sc_dev), i));
2613
2614 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2615
2616 status = sc->sc_rxdescs[i].wrx_status;
2617 errors = sc->sc_rxdescs[i].wrx_errors;
2618 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2619 vlantag = sc->sc_rxdescs[i].wrx_special;
2620
2621 if ((status & WRX_ST_DD) == 0) {
2622 /*
2623 * We have processed all of the receive descriptors.
2624 */
2625 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2626 break;
2627 }
2628
2629 if (__predict_false(sc->sc_rxdiscard)) {
2630 DPRINTF(WM_DEBUG_RX,
2631 ("%s: RX: discarding contents of descriptor %d\n",
2632 device_xname(sc->sc_dev), i));
2633 WM_INIT_RXDESC(sc, i);
2634 if (status & WRX_ST_EOP) {
2635 /* Reset our state. */
2636 DPRINTF(WM_DEBUG_RX,
2637 ("%s: RX: resetting rxdiscard -> 0\n",
2638 device_xname(sc->sc_dev)));
2639 sc->sc_rxdiscard = 0;
2640 }
2641 continue;
2642 }
2643
2644 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2645 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2646
2647 m = rxs->rxs_mbuf;
2648
2649 /*
2650 * Add a new receive buffer to the ring, unless of
2651 * course the length is zero. Treat the latter as a
2652 * failed mapping.
2653 */
2654 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2655 /*
2656 * Failed, throw away what we've done so
2657 * far, and discard the rest of the packet.
2658 */
2659 ifp->if_ierrors++;
2660 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2661 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2662 WM_INIT_RXDESC(sc, i);
2663 if ((status & WRX_ST_EOP) == 0)
2664 sc->sc_rxdiscard = 1;
2665 if (sc->sc_rxhead != NULL)
2666 m_freem(sc->sc_rxhead);
2667 WM_RXCHAIN_RESET(sc);
2668 DPRINTF(WM_DEBUG_RX,
2669 ("%s: RX: Rx buffer allocation failed, "
2670 "dropping packet%s\n", device_xname(sc->sc_dev),
2671 sc->sc_rxdiscard ? " (discard)" : ""));
2672 continue;
2673 }
2674
2675 m->m_len = len;
2676 sc->sc_rxlen += len;
2677 DPRINTF(WM_DEBUG_RX,
2678 ("%s: RX: buffer at %p len %d\n",
2679 device_xname(sc->sc_dev), m->m_data, len));
2680
2681 /*
2682 * If this is not the end of the packet, keep
2683 * looking.
2684 */
2685 if ((status & WRX_ST_EOP) == 0) {
2686 WM_RXCHAIN_LINK(sc, m);
2687 DPRINTF(WM_DEBUG_RX,
2688 ("%s: RX: not yet EOP, rxlen -> %d\n",
2689 device_xname(sc->sc_dev), sc->sc_rxlen));
2690 continue;
2691 }
2692
2693 /*
2694 * Okay, we have the entire packet now. The chip is
2695 * configured to include the FCS (not all chips can
2696 * be configured to strip it), so we need to trim it.
2697 * May need to adjust length of previous mbuf in the
2698 * chain if the current mbuf is too short.
2699 */
2700 if (m->m_len < ETHER_CRC_LEN) {
2701 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2702 m->m_len = 0;
2703 } else {
2704 m->m_len -= ETHER_CRC_LEN;
2705 }
2706 len = sc->sc_rxlen - ETHER_CRC_LEN;
2707
2708 WM_RXCHAIN_LINK(sc, m);
2709
2710 *sc->sc_rxtailp = NULL;
2711 m = sc->sc_rxhead;
2712
2713 WM_RXCHAIN_RESET(sc);
2714
2715 DPRINTF(WM_DEBUG_RX,
2716 ("%s: RX: have entire packet, len -> %d\n",
2717 device_xname(sc->sc_dev), len));
2718
2719 /*
2720 * If an error occurred, update stats and drop the packet.
2721 */
2722 if (errors &
2723 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2724 ifp->if_ierrors++;
2725 if (errors & WRX_ER_SE)
2726 log(LOG_WARNING, "%s: symbol error\n",
2727 device_xname(sc->sc_dev));
2728 else if (errors & WRX_ER_SEQ)
2729 log(LOG_WARNING, "%s: receive sequence error\n",
2730 device_xname(sc->sc_dev));
2731 else if (errors & WRX_ER_CE)
2732 log(LOG_WARNING, "%s: CRC error\n",
2733 device_xname(sc->sc_dev));
2734 m_freem(m);
2735 continue;
2736 }
2737
2738 /*
2739 * No errors. Receive the packet.
2740 */
2741 m->m_pkthdr.rcvif = ifp;
2742 m->m_pkthdr.len = len;
2743
2744 /*
2745 * If VLANs are enabled, VLAN packets have been unwrapped
2746 * for us. Associate the tag with the packet.
2747 */
2748 if ((status & WRX_ST_VP) != 0) {
2749 VLAN_INPUT_TAG(ifp, m,
2750 le16toh(vlantag),
2751 continue);
2752 }
2753
2754 /*
2755 * Set up checksum info for this packet.
2756 */
2757 if ((status & WRX_ST_IXSM) == 0) {
2758 if (status & WRX_ST_IPCS) {
2759 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2760 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2761 if (errors & WRX_ER_IPE)
2762 m->m_pkthdr.csum_flags |=
2763 M_CSUM_IPv4_BAD;
2764 }
2765 if (status & WRX_ST_TCPCS) {
2766 /*
2767 * Note: we don't know if this was TCP or UDP,
2768 * so we just set both bits, and expect the
2769 * upper layers to deal.
2770 */
2771 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2772 m->m_pkthdr.csum_flags |=
2773 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2774 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2775 if (errors & WRX_ER_TCPE)
2776 m->m_pkthdr.csum_flags |=
2777 M_CSUM_TCP_UDP_BAD;
2778 }
2779 }
2780
2781 ifp->if_ipackets++;
2782
2783 #if NBPFILTER > 0
2784 /* Pass this up to any BPF listeners. */
2785 if (ifp->if_bpf)
2786 bpf_mtap(ifp->if_bpf, m);
2787 #endif /* NBPFILTER > 0 */
2788
2789 /* Pass it on. */
2790 (*ifp->if_input)(ifp, m);
2791 }
2792
2793 /* Update the receive pointer. */
2794 sc->sc_rxptr = i;
2795
2796 DPRINTF(WM_DEBUG_RX,
2797 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2798 }
2799
2800 /*
2801 * wm_linkintr:
2802 *
2803 * Helper; handle link interrupts.
2804 */
2805 static void
2806 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2807 {
2808 uint32_t status;
2809
2810 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2811 __func__));
2812 /*
2813 * If we get a link status interrupt on a 1000BASE-T
2814 * device, just fall into the normal MII tick path.
2815 */
2816 if (sc->sc_flags & WM_F_HAS_MII) {
2817 if (icr & ICR_LSC) {
2818 DPRINTF(WM_DEBUG_LINK,
2819 ("%s: LINK: LSC -> mii_tick\n",
2820 device_xname(sc->sc_dev)));
2821 mii_tick(&sc->sc_mii);
2822 if (sc->sc_type == WM_T_82543) {
2823 int miistatus, active;
2824
2825 /*
2826 * With 82543, we need to force speed and
2827 * duplex on the MAC equal to what the PHY
2828 * speed and duplex configuration is.
2829 */
2830 miistatus = sc->sc_mii.mii_media_status;
2831
2832 if (miistatus & IFM_ACTIVE) {
2833 active = sc->sc_mii.mii_media_active;
2834 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2835 | CTRL_FD);
2836 switch (IFM_SUBTYPE(active)) {
2837 case IFM_10_T:
2838 sc->sc_ctrl |= CTRL_SPEED_10;
2839 break;
2840 case IFM_100_TX:
2841 sc->sc_ctrl |= CTRL_SPEED_100;
2842 break;
2843 case IFM_1000_T:
2844 sc->sc_ctrl |= CTRL_SPEED_1000;
2845 break;
2846 default:
2847 /*
2848 * fiber?
2849 * Shoud not enter here.
2850 */
2851 printf("unknown media (%x)\n",
2852 active);
2853 break;
2854 }
2855 if (active & IFM_FDX)
2856 sc->sc_ctrl |= CTRL_FD;
2857 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2858 }
2859 }
2860 } else if (icr & ICR_RXSEQ) {
2861 DPRINTF(WM_DEBUG_LINK,
2862 ("%s: LINK Receive sequence error\n",
2863 device_xname(sc->sc_dev)));
2864 }
2865 return;
2866 }
2867
2868 status = CSR_READ(sc, WMREG_STATUS);
2869 if (icr & ICR_LSC) {
2870 if (status & STATUS_LU) {
2871 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2872 device_xname(sc->sc_dev),
2873 (status & STATUS_FD) ? "FDX" : "HDX"));
2874 /*
2875 * NOTE: CTRL will update TFCE and RFCE automatically,
2876 * so we should update sc->sc_ctrl
2877 */
2878
2879 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2880 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2881 sc->sc_fcrtl &= ~FCRTL_XONE;
2882 if (status & STATUS_FD)
2883 sc->sc_tctl |=
2884 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2885 else
2886 sc->sc_tctl |=
2887 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2888 if (sc->sc_ctrl & CTRL_TFCE)
2889 sc->sc_fcrtl |= FCRTL_XONE;
2890 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2891 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2892 WMREG_OLD_FCRTL : WMREG_FCRTL,
2893 sc->sc_fcrtl);
2894 sc->sc_tbi_linkup = 1;
2895 } else {
2896 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2897 device_xname(sc->sc_dev)));
2898 sc->sc_tbi_linkup = 0;
2899 }
2900 wm_tbi_set_linkled(sc);
2901 } else if (icr & ICR_RXCFG) {
2902 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2903 device_xname(sc->sc_dev)));
2904 sc->sc_tbi_nrxcfg++;
2905 wm_check_for_link(sc);
2906 } else if (icr & ICR_RXSEQ) {
2907 DPRINTF(WM_DEBUG_LINK,
2908 ("%s: LINK: Receive sequence error\n",
2909 device_xname(sc->sc_dev)));
2910 }
2911 }
2912
2913 /*
2914 * wm_tick:
2915 *
2916 * One second timer, used to check link status, sweep up
2917 * completed transmit jobs, etc.
2918 */
2919 static void
2920 wm_tick(void *arg)
2921 {
2922 struct wm_softc *sc = arg;
2923 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2924 int s;
2925
2926 s = splnet();
2927
2928 if (sc->sc_type >= WM_T_82542_2_1) {
2929 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2930 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2931 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2932 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2933 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2934 }
2935
2936 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2937 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2938
2939
2940 if (sc->sc_flags & WM_F_HAS_MII)
2941 mii_tick(&sc->sc_mii);
2942 else
2943 wm_tbi_check_link(sc);
2944
2945 splx(s);
2946
2947 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2948 }
2949
2950 /*
2951 * wm_reset:
2952 *
2953 * Reset the i82542 chip.
2954 */
2955 static void
2956 wm_reset(struct wm_softc *sc)
2957 {
2958 uint32_t reg;
2959
2960 /*
2961 * Allocate on-chip memory according to the MTU size.
2962 * The Packet Buffer Allocation register must be written
2963 * before the chip is reset.
2964 */
2965 switch (sc->sc_type) {
2966 case WM_T_82547:
2967 case WM_T_82547_2:
2968 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2969 PBA_22K : PBA_30K;
2970 sc->sc_txfifo_head = 0;
2971 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2972 sc->sc_txfifo_size =
2973 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2974 sc->sc_txfifo_stall = 0;
2975 break;
2976 case WM_T_82571:
2977 case WM_T_82572:
2978 case WM_T_80003:
2979 sc->sc_pba = PBA_32K;
2980 break;
2981 case WM_T_82573:
2982 case WM_T_82574:
2983 sc->sc_pba = PBA_12K;
2984 break;
2985 case WM_T_ICH8:
2986 sc->sc_pba = PBA_8K;
2987 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2988 break;
2989 case WM_T_ICH9:
2990 case WM_T_ICH10:
2991 sc->sc_pba = PBA_10K;
2992 break;
2993 default:
2994 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2995 PBA_40K : PBA_48K;
2996 break;
2997 }
2998 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2999
3000 if (sc->sc_flags & WM_F_PCIE) {
3001 int timeout = 800;
3002
3003 sc->sc_ctrl |= CTRL_GIO_M_DIS;
3004 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3005
3006 while (timeout) {
3007 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3008 break;
3009 delay(100);
3010 }
3011 }
3012
3013 /* clear interrupt */
3014 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3015
3016 /*
3017 * 82541 Errata 29? & 82547 Errata 28?
3018 * See also the description about PHY_RST bit in CTRL register
3019 * in 8254x_GBe_SDM.pdf.
3020 */
3021 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3022 CSR_WRITE(sc, WMREG_CTRL,
3023 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3024 delay(5000);
3025 }
3026
3027 switch (sc->sc_type) {
3028 case WM_T_82544:
3029 case WM_T_82540:
3030 case WM_T_82545:
3031 case WM_T_82546:
3032 case WM_T_82541:
3033 case WM_T_82541_2:
3034 /*
3035 * On some chipsets, a reset through a memory-mapped write
3036 * cycle can cause the chip to reset before completing the
3037 * write cycle. This causes major headache that can be
3038 * avoided by issuing the reset via indirect register writes
3039 * through I/O space.
3040 *
3041 * So, if we successfully mapped the I/O BAR at attach time,
3042 * use that. Otherwise, try our luck with a memory-mapped
3043 * reset.
3044 */
3045 if (sc->sc_flags & WM_F_IOH_VALID)
3046 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3047 else
3048 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3049 break;
3050
3051 case WM_T_82545_3:
3052 case WM_T_82546_3:
3053 /* Use the shadow control register on these chips. */
3054 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3055 break;
3056
3057 case WM_T_ICH8:
3058 case WM_T_ICH9:
3059 case WM_T_ICH10:
3060 wm_get_swfwhw_semaphore(sc);
3061 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3062 delay(10000);
3063
3064 default:
3065 /* Everything else can safely use the documented method. */
3066 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3067 break;
3068 }
3069 delay(10000);
3070
3071 /* reload EEPROM */
3072 switch(sc->sc_type) {
3073 case WM_T_82542_2_0:
3074 case WM_T_82542_2_1:
3075 case WM_T_82543:
3076 case WM_T_82544:
3077 delay(10);
3078 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3079 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3080 delay(2000);
3081 break;
3082 case WM_T_82541:
3083 case WM_T_82541_2:
3084 case WM_T_82547:
3085 case WM_T_82547_2:
3086 delay(20000);
3087 break;
3088 case WM_T_82573:
3089 case WM_T_82574:
3090 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3091 delay(10);
3092 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3093 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3094 }
3095 /* FALLTHROUGH */
3096 default:
3097 /* check EECD_EE_AUTORD */
3098 wm_get_auto_rd_done(sc);
3099 }
3100
3101 /* reload sc_ctrl */
3102 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3103
3104 #if 0
3105 for (i = 0; i < 1000; i++) {
3106 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3107 return;
3108 }
3109 delay(20);
3110 }
3111
3112 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3113 log(LOG_ERR, "%s: reset failed to complete\n",
3114 device_xname(sc->sc_dev));
3115 #endif
3116 }
3117
3118 /*
3119 * wm_init: [ifnet interface function]
3120 *
3121 * Initialize the interface. Must be called at splnet().
3122 */
3123 static int
3124 wm_init(struct ifnet *ifp)
3125 {
3126 struct wm_softc *sc = ifp->if_softc;
3127 struct wm_rxsoft *rxs;
3128 int i, error = 0;
3129 uint32_t reg;
3130
3131 /*
3132 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3133 * There is a small but measurable benefit to avoiding the adjusment
3134 * of the descriptor so that the headers are aligned, for normal mtu,
3135 * on such platforms. One possibility is that the DMA itself is
3136 * slightly more efficient if the front of the entire packet (instead
3137 * of the front of the headers) is aligned.
3138 *
3139 * Note we must always set align_tweak to 0 if we are using
3140 * jumbo frames.
3141 */
3142 #ifdef __NO_STRICT_ALIGNMENT
3143 sc->sc_align_tweak = 0;
3144 #else
3145 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3146 sc->sc_align_tweak = 0;
3147 else
3148 sc->sc_align_tweak = 2;
3149 #endif /* __NO_STRICT_ALIGNMENT */
3150
3151 /* Cancel any pending I/O. */
3152 wm_stop(ifp, 0);
3153
3154 /* update statistics before reset */
3155 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3156 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3157
3158 /* Reset the chip to a known state. */
3159 wm_reset(sc);
3160
3161 switch (sc->sc_type) {
3162 case WM_T_82571:
3163 case WM_T_82572:
3164 case WM_T_82573:
3165 case WM_T_82574:
3166 case WM_T_80003:
3167 case WM_T_ICH8:
3168 case WM_T_ICH9:
3169 case WM_T_ICH10:
3170 if (wm_check_mng_mode(sc) != 0)
3171 wm_get_hw_control(sc);
3172 break;
3173 default:
3174 break;
3175 }
3176
3177 /* Initialize the transmit descriptor ring. */
3178 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3179 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3180 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3181 sc->sc_txfree = WM_NTXDESC(sc);
3182 sc->sc_txnext = 0;
3183
3184 if (sc->sc_type < WM_T_82543) {
3185 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3186 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3187 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3188 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3189 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3190 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3191 } else {
3192 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3193 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3194 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3195 CSR_WRITE(sc, WMREG_TDH, 0);
3196 CSR_WRITE(sc, WMREG_TDT, 0);
3197 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3198 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3199
3200 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3201 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3202 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3203 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3204 }
3205 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3206 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3207
3208 /* Initialize the transmit job descriptors. */
3209 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3210 sc->sc_txsoft[i].txs_mbuf = NULL;
3211 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3212 sc->sc_txsnext = 0;
3213 sc->sc_txsdirty = 0;
3214
3215 /*
3216 * Initialize the receive descriptor and receive job
3217 * descriptor rings.
3218 */
3219 if (sc->sc_type < WM_T_82543) {
3220 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3221 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3222 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3223 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3224 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3225 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3226
3227 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3228 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3229 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3230 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3231 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3232 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3233 } else {
3234 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3235 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3236 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3237 CSR_WRITE(sc, WMREG_RDH, 0);
3238 CSR_WRITE(sc, WMREG_RDT, 0);
3239 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3240 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3241 }
3242 for (i = 0; i < WM_NRXDESC; i++) {
3243 rxs = &sc->sc_rxsoft[i];
3244 if (rxs->rxs_mbuf == NULL) {
3245 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3246 log(LOG_ERR, "%s: unable to allocate or map rx "
3247 "buffer %d, error = %d\n",
3248 device_xname(sc->sc_dev), i, error);
3249 /*
3250 * XXX Should attempt to run with fewer receive
3251 * XXX buffers instead of just failing.
3252 */
3253 wm_rxdrain(sc);
3254 goto out;
3255 }
3256 } else
3257 WM_INIT_RXDESC(sc, i);
3258 }
3259 sc->sc_rxptr = 0;
3260 sc->sc_rxdiscard = 0;
3261 WM_RXCHAIN_RESET(sc);
3262
3263 /*
3264 * Clear out the VLAN table -- we don't use it (yet).
3265 */
3266 CSR_WRITE(sc, WMREG_VET, 0);
3267 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3268 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3269
3270 /*
3271 * Set up flow-control parameters.
3272 *
3273 * XXX Values could probably stand some tuning.
3274 */
3275 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
3276 && (sc->sc_type != WM_T_ICH10)) {
3277 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3278 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3279 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3280 }
3281
3282 sc->sc_fcrtl = FCRTL_DFLT;
3283 if (sc->sc_type < WM_T_82543) {
3284 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3285 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3286 } else {
3287 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3288 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3289 }
3290
3291 if (sc->sc_type == WM_T_80003)
3292 CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
3293 else
3294 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3295
3296 /* Deal with VLAN enables. */
3297 if (VLAN_ATTACHED(&sc->sc_ethercom))
3298 sc->sc_ctrl |= CTRL_VME;
3299 else
3300 sc->sc_ctrl &= ~CTRL_VME;
3301
3302 /* Write the control registers. */
3303 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3304
3305 if (sc->sc_flags & WM_F_HAS_MII) {
3306 int val;
3307
3308 switch (sc->sc_type) {
3309 case WM_T_80003:
3310 case WM_T_ICH8:
3311 case WM_T_ICH9:
3312 case WM_T_ICH10:
3313 /*
3314 * Set the mac to wait the maximum time between each
3315 * iteration and increase the max iterations when
3316 * polling the phy; this fixes erroneous timeouts at
3317 * 10Mbps.
3318 */
3319 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
3320 0xFFFF);
3321 val = wm_kmrn_readreg(sc,
3322 KUMCTRLSTA_OFFSET_INB_PARAM);
3323 val |= 0x3F;
3324 wm_kmrn_writereg(sc,
3325 KUMCTRLSTA_OFFSET_INB_PARAM, val);
3326 break;
3327 default:
3328 break;
3329 }
3330
3331 if (sc->sc_type == WM_T_80003) {
3332 val = CSR_READ(sc, WMREG_CTRL_EXT);
3333 val &= ~CTRL_EXT_LINK_MODE_MASK;
3334 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3335
3336 /* Bypass RX and TX FIFO's */
3337 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3338 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3339 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3340
3341 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3342 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3343 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3344 }
3345 }
3346 #if 0
3347 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3348 #endif
3349
3350 /*
3351 * Set up checksum offload parameters.
3352 */
3353 reg = CSR_READ(sc, WMREG_RXCSUM);
3354 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3355 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3356 reg |= RXCSUM_IPOFL;
3357 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3358 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3359 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3360 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3361 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3362
3363 /* Reset TBI's RXCFG count */
3364 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3365
3366 /*
3367 * Set up the interrupt registers.
3368 */
3369 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3370 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3371 ICR_RXO | ICR_RXT0;
3372 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3373 sc->sc_icr |= ICR_RXCFG;
3374 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3375
3376 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3377 || (sc->sc_type == WM_T_ICH10)) {
3378 reg = CSR_READ(sc, WMREG_KABGTXD);
3379 reg |= KABGTXD_BGSQLBIAS;
3380 CSR_WRITE(sc, WMREG_KABGTXD, reg);
3381 }
3382
3383 /* Set up the inter-packet gap. */
3384 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3385
3386 if (sc->sc_type >= WM_T_82543) {
3387 /*
3388 * Set up the interrupt throttling register (units of 256ns)
3389 * Note that a footnote in Intel's documentation says this
3390 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3391 * or 10Mbit mode. Empirically, it appears to be the case
3392 * that that is also true for the 1024ns units of the other
3393 * interrupt-related timer registers -- so, really, we ought
3394 * to divide this value by 4 when the link speed is low.
3395 *
3396 * XXX implement this division at link speed change!
3397 */
3398
3399 /*
3400 * For N interrupts/sec, set this value to:
3401 * 1000000000 / (N * 256). Note that we set the
3402 * absolute and packet timer values to this value
3403 * divided by 4 to get "simple timer" behavior.
3404 */
3405
3406 sc->sc_itr = 1500; /* 2604 ints/sec */
3407 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3408 }
3409
3410 /* Set the VLAN ethernetype. */
3411 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3412
3413 /*
3414 * Set up the transmit control register; we start out with
3415 * a collision distance suitable for FDX, but update it whe
3416 * we resolve the media type.
3417 */
3418 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
3419 | TCTL_CT(TX_COLLISION_THRESHOLD)
3420 | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3421 if (sc->sc_type >= WM_T_82571)
3422 sc->sc_tctl |= TCTL_MULR;
3423 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3424
3425 if (sc->sc_type == WM_T_80003) {
3426 reg = CSR_READ(sc, WMREG_TCTL_EXT);
3427 reg &= ~TCTL_EXT_GCEX_MASK;
3428 reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
3429 CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
3430 }
3431
3432 /* Set the media. */
3433 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3434 goto out;
3435
3436 /*
3437 * Set up the receive control register; we actually program
3438 * the register when we set the receive filter. Use multicast
3439 * address offset type 0.
3440 *
3441 * Only the i82544 has the ability to strip the incoming
3442 * CRC, so we don't enable that feature.
3443 */
3444 sc->sc_mchash_type = 0;
3445 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3446 | RCTL_MO(sc->sc_mchash_type);
3447
3448 /* 82573 doesn't support jumbo frame */
3449 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3450 sc->sc_type != WM_T_ICH8)
3451 sc->sc_rctl |= RCTL_LPE;
3452
3453 if (MCLBYTES == 2048) {
3454 sc->sc_rctl |= RCTL_2k;
3455 } else {
3456 if (sc->sc_type >= WM_T_82543) {
3457 switch(MCLBYTES) {
3458 case 4096:
3459 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3460 break;
3461 case 8192:
3462 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3463 break;
3464 case 16384:
3465 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3466 break;
3467 default:
3468 panic("wm_init: MCLBYTES %d unsupported",
3469 MCLBYTES);
3470 break;
3471 }
3472 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3473 }
3474
3475 /* Set the receive filter. */
3476 wm_set_filter(sc);
3477
3478 /* Start the one second link check clock. */
3479 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3480
3481 /* ...all done! */
3482 ifp->if_flags |= IFF_RUNNING;
3483 ifp->if_flags &= ~IFF_OACTIVE;
3484
3485 out:
3486 if (error)
3487 log(LOG_ERR, "%s: interface not running\n",
3488 device_xname(sc->sc_dev));
3489 return (error);
3490 }
3491
3492 /*
3493 * wm_rxdrain:
3494 *
3495 * Drain the receive queue.
3496 */
3497 static void
3498 wm_rxdrain(struct wm_softc *sc)
3499 {
3500 struct wm_rxsoft *rxs;
3501 int i;
3502
3503 for (i = 0; i < WM_NRXDESC; i++) {
3504 rxs = &sc->sc_rxsoft[i];
3505 if (rxs->rxs_mbuf != NULL) {
3506 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3507 m_freem(rxs->rxs_mbuf);
3508 rxs->rxs_mbuf = NULL;
3509 }
3510 }
3511 }
3512
3513 /*
3514 * wm_stop: [ifnet interface function]
3515 *
3516 * Stop transmission on the interface.
3517 */
3518 static void
3519 wm_stop(struct ifnet *ifp, int disable)
3520 {
3521 struct wm_softc *sc = ifp->if_softc;
3522 struct wm_txsoft *txs;
3523 int i;
3524
3525 /* Stop the one second clock. */
3526 callout_stop(&sc->sc_tick_ch);
3527
3528 /* Stop the 82547 Tx FIFO stall check timer. */
3529 if (sc->sc_type == WM_T_82547)
3530 callout_stop(&sc->sc_txfifo_ch);
3531
3532 if (sc->sc_flags & WM_F_HAS_MII) {
3533 /* Down the MII. */
3534 mii_down(&sc->sc_mii);
3535 } else {
3536 #if 0
3537 /* Should we clear PHY's status properly? */
3538 wm_reset(sc);
3539 #endif
3540 }
3541
3542 /* Stop the transmit and receive processes. */
3543 CSR_WRITE(sc, WMREG_TCTL, 0);
3544 CSR_WRITE(sc, WMREG_RCTL, 0);
3545
3546 /*
3547 * Clear the interrupt mask to ensure the device cannot assert its
3548 * interrupt line.
3549 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3550 * any currently pending or shared interrupt.
3551 */
3552 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3553 sc->sc_icr = 0;
3554
3555 /* Release any queued transmit buffers. */
3556 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3557 txs = &sc->sc_txsoft[i];
3558 if (txs->txs_mbuf != NULL) {
3559 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3560 m_freem(txs->txs_mbuf);
3561 txs->txs_mbuf = NULL;
3562 }
3563 }
3564
3565 /* Mark the interface as down and cancel the watchdog timer. */
3566 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3567 ifp->if_timer = 0;
3568
3569 if (disable)
3570 wm_rxdrain(sc);
3571 }
3572
3573 void
3574 wm_get_auto_rd_done(struct wm_softc *sc)
3575 {
3576 int i;
3577
3578 /* wait for eeprom to reload */
3579 switch (sc->sc_type) {
3580 case WM_T_82571:
3581 case WM_T_82572:
3582 case WM_T_82573:
3583 case WM_T_82574:
3584 case WM_T_80003:
3585 case WM_T_ICH8:
3586 case WM_T_ICH9:
3587 case WM_T_ICH10:
3588 for (i = 10; i > 0; i--) {
3589 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3590 break;
3591 delay(1000);
3592 }
3593 if (i == 0) {
3594 log(LOG_ERR, "%s: auto read from eeprom failed to "
3595 "complete\n", device_xname(sc->sc_dev));
3596 }
3597 break;
3598 default:
3599 delay(5000);
3600 break;
3601 }
3602
3603 /* Phy configuration starts after EECD_AUTO_RD is set */
3604 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574)
3605 delay(25000);
3606 }
3607
3608 /*
3609 * wm_acquire_eeprom:
3610 *
3611 * Perform the EEPROM handshake required on some chips.
3612 */
3613 static int
3614 wm_acquire_eeprom(struct wm_softc *sc)
3615 {
3616 uint32_t reg;
3617 int x;
3618 int ret = 0;
3619
3620 /* always success */
3621 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3622 return 0;
3623
3624 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3625 ret = wm_get_swfwhw_semaphore(sc);
3626 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3627 /* this will also do wm_get_swsm_semaphore() if needed */
3628 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3629 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3630 ret = wm_get_swsm_semaphore(sc);
3631 }
3632
3633 if (ret) {
3634 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3635 __func__);
3636 return 1;
3637 }
3638
3639 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3640 reg = CSR_READ(sc, WMREG_EECD);
3641
3642 /* Request EEPROM access. */
3643 reg |= EECD_EE_REQ;
3644 CSR_WRITE(sc, WMREG_EECD, reg);
3645
3646 /* ..and wait for it to be granted. */
3647 for (x = 0; x < 1000; x++) {
3648 reg = CSR_READ(sc, WMREG_EECD);
3649 if (reg & EECD_EE_GNT)
3650 break;
3651 delay(5);
3652 }
3653 if ((reg & EECD_EE_GNT) == 0) {
3654 aprint_error_dev(sc->sc_dev,
3655 "could not acquire EEPROM GNT\n");
3656 reg &= ~EECD_EE_REQ;
3657 CSR_WRITE(sc, WMREG_EECD, reg);
3658 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3659 wm_put_swfwhw_semaphore(sc);
3660 if (sc->sc_flags & WM_F_SWFW_SYNC)
3661 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3662 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3663 wm_put_swsm_semaphore(sc);
3664 return (1);
3665 }
3666 }
3667
3668 return (0);
3669 }
3670
3671 /*
3672 * wm_release_eeprom:
3673 *
3674 * Release the EEPROM mutex.
3675 */
3676 static void
3677 wm_release_eeprom(struct wm_softc *sc)
3678 {
3679 uint32_t reg;
3680
3681 /* always success */
3682 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3683 return;
3684
3685 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3686 reg = CSR_READ(sc, WMREG_EECD);
3687 reg &= ~EECD_EE_REQ;
3688 CSR_WRITE(sc, WMREG_EECD, reg);
3689 }
3690
3691 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3692 wm_put_swfwhw_semaphore(sc);
3693 if (sc->sc_flags & WM_F_SWFW_SYNC)
3694 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3695 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3696 wm_put_swsm_semaphore(sc);
3697 }
3698
3699 /*
3700 * wm_eeprom_sendbits:
3701 *
3702 * Send a series of bits to the EEPROM.
3703 */
3704 static void
3705 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3706 {
3707 uint32_t reg;
3708 int x;
3709
3710 reg = CSR_READ(sc, WMREG_EECD);
3711
3712 for (x = nbits; x > 0; x--) {
3713 if (bits & (1U << (x - 1)))
3714 reg |= EECD_DI;
3715 else
3716 reg &= ~EECD_DI;
3717 CSR_WRITE(sc, WMREG_EECD, reg);
3718 delay(2);
3719 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3720 delay(2);
3721 CSR_WRITE(sc, WMREG_EECD, reg);
3722 delay(2);
3723 }
3724 }
3725
3726 /*
3727 * wm_eeprom_recvbits:
3728 *
3729 * Receive a series of bits from the EEPROM.
3730 */
3731 static void
3732 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3733 {
3734 uint32_t reg, val;
3735 int x;
3736
3737 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3738
3739 val = 0;
3740 for (x = nbits; x > 0; x--) {
3741 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3742 delay(2);
3743 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3744 val |= (1U << (x - 1));
3745 CSR_WRITE(sc, WMREG_EECD, reg);
3746 delay(2);
3747 }
3748 *valp = val;
3749 }
3750
3751 /*
3752 * wm_read_eeprom_uwire:
3753 *
3754 * Read a word from the EEPROM using the MicroWire protocol.
3755 */
3756 static int
3757 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3758 {
3759 uint32_t reg, val;
3760 int i;
3761
3762 for (i = 0; i < wordcnt; i++) {
3763 /* Clear SK and DI. */
3764 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3765 CSR_WRITE(sc, WMREG_EECD, reg);
3766
3767 /* Set CHIP SELECT. */
3768 reg |= EECD_CS;
3769 CSR_WRITE(sc, WMREG_EECD, reg);
3770 delay(2);
3771
3772 /* Shift in the READ command. */
3773 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3774
3775 /* Shift in address. */
3776 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3777
3778 /* Shift out the data. */
3779 wm_eeprom_recvbits(sc, &val, 16);
3780 data[i] = val & 0xffff;
3781
3782 /* Clear CHIP SELECT. */
3783 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3784 CSR_WRITE(sc, WMREG_EECD, reg);
3785 delay(2);
3786 }
3787
3788 return (0);
3789 }
3790
3791 /*
3792 * wm_spi_eeprom_ready:
3793 *
3794 * Wait for a SPI EEPROM to be ready for commands.
3795 */
3796 static int
3797 wm_spi_eeprom_ready(struct wm_softc *sc)
3798 {
3799 uint32_t val;
3800 int usec;
3801
3802 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3803 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3804 wm_eeprom_recvbits(sc, &val, 8);
3805 if ((val & SPI_SR_RDY) == 0)
3806 break;
3807 }
3808 if (usec >= SPI_MAX_RETRIES) {
3809 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3810 return (1);
3811 }
3812 return (0);
3813 }
3814
3815 /*
3816 * wm_read_eeprom_spi:
3817 *
3818 * Read a work from the EEPROM using the SPI protocol.
3819 */
3820 static int
3821 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3822 {
3823 uint32_t reg, val;
3824 int i;
3825 uint8_t opc;
3826
3827 /* Clear SK and CS. */
3828 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3829 CSR_WRITE(sc, WMREG_EECD, reg);
3830 delay(2);
3831
3832 if (wm_spi_eeprom_ready(sc))
3833 return (1);
3834
3835 /* Toggle CS to flush commands. */
3836 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3837 delay(2);
3838 CSR_WRITE(sc, WMREG_EECD, reg);
3839 delay(2);
3840
3841 opc = SPI_OPC_READ;
3842 if (sc->sc_ee_addrbits == 8 && word >= 128)
3843 opc |= SPI_OPC_A8;
3844
3845 wm_eeprom_sendbits(sc, opc, 8);
3846 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3847
3848 for (i = 0; i < wordcnt; i++) {
3849 wm_eeprom_recvbits(sc, &val, 16);
3850 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3851 }
3852
3853 /* Raise CS and clear SK. */
3854 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3855 CSR_WRITE(sc, WMREG_EECD, reg);
3856 delay(2);
3857
3858 return (0);
3859 }
3860
3861 #define EEPROM_CHECKSUM 0xBABA
3862 #define EEPROM_SIZE 0x0040
3863
3864 /*
3865 * wm_validate_eeprom_checksum
3866 *
3867 * The checksum is defined as the sum of the first 64 (16 bit) words.
3868 */
3869 static int
3870 wm_validate_eeprom_checksum(struct wm_softc *sc)
3871 {
3872 uint16_t checksum;
3873 uint16_t eeprom_data;
3874 int i;
3875
3876 checksum = 0;
3877
3878 for (i = 0; i < EEPROM_SIZE; i++) {
3879 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3880 return 1;
3881 checksum += eeprom_data;
3882 }
3883
3884 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3885 return 1;
3886
3887 return 0;
3888 }
3889
3890 /*
3891 * wm_read_eeprom:
3892 *
3893 * Read data from the serial EEPROM.
3894 */
3895 static int
3896 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3897 {
3898 int rv;
3899
3900 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3901 return 1;
3902
3903 if (wm_acquire_eeprom(sc))
3904 return 1;
3905
3906 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3907 || (sc->sc_type == WM_T_ICH10))
3908 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3909 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3910 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3911 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3912 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3913 else
3914 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3915
3916 wm_release_eeprom(sc);
3917 return rv;
3918 }
3919
3920 static int
3921 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3922 uint16_t *data)
3923 {
3924 int i, eerd = 0;
3925 int error = 0;
3926
3927 for (i = 0; i < wordcnt; i++) {
3928 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3929
3930 CSR_WRITE(sc, WMREG_EERD, eerd);
3931 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3932 if (error != 0)
3933 break;
3934
3935 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3936 }
3937
3938 return error;
3939 }
3940
3941 static int
3942 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3943 {
3944 uint32_t attempts = 100000;
3945 uint32_t i, reg = 0;
3946 int32_t done = -1;
3947
3948 for (i = 0; i < attempts; i++) {
3949 reg = CSR_READ(sc, rw);
3950
3951 if (reg & EERD_DONE) {
3952 done = 0;
3953 break;
3954 }
3955 delay(5);
3956 }
3957
3958 return done;
3959 }
3960
3961 /*
3962 * wm_add_rxbuf:
3963 *
3964 * Add a receive buffer to the indiciated descriptor.
3965 */
3966 static int
3967 wm_add_rxbuf(struct wm_softc *sc, int idx)
3968 {
3969 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3970 struct mbuf *m;
3971 int error;
3972
3973 MGETHDR(m, M_DONTWAIT, MT_DATA);
3974 if (m == NULL)
3975 return (ENOBUFS);
3976
3977 MCLGET(m, M_DONTWAIT);
3978 if ((m->m_flags & M_EXT) == 0) {
3979 m_freem(m);
3980 return (ENOBUFS);
3981 }
3982
3983 if (rxs->rxs_mbuf != NULL)
3984 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3985
3986 rxs->rxs_mbuf = m;
3987
3988 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3989 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3990 BUS_DMA_READ|BUS_DMA_NOWAIT);
3991 if (error) {
3992 /* XXX XXX XXX */
3993 aprint_error_dev(sc->sc_dev,
3994 "unable to load rx DMA map %d, error = %d\n",
3995 idx, error);
3996 panic("wm_add_rxbuf");
3997 }
3998
3999 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4000 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4001
4002 WM_INIT_RXDESC(sc, idx);
4003
4004 return (0);
4005 }
4006
4007 /*
4008 * wm_set_ral:
4009 *
4010 * Set an entery in the receive address list.
4011 */
4012 static void
4013 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
4014 {
4015 uint32_t ral_lo, ral_hi;
4016
4017 if (enaddr != NULL) {
4018 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
4019 (enaddr[3] << 24);
4020 ral_hi = enaddr[4] | (enaddr[5] << 8);
4021 ral_hi |= RAL_AV;
4022 } else {
4023 ral_lo = 0;
4024 ral_hi = 0;
4025 }
4026
4027 if (sc->sc_type >= WM_T_82544) {
4028 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
4029 ral_lo);
4030 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
4031 ral_hi);
4032 } else {
4033 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
4034 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
4035 }
4036 }
4037
4038 /*
4039 * wm_mchash:
4040 *
4041 * Compute the hash of the multicast address for the 4096-bit
4042 * multicast filter.
4043 */
4044 static uint32_t
4045 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4046 {
4047 static const int lo_shift[4] = { 4, 3, 2, 0 };
4048 static const int hi_shift[4] = { 4, 5, 6, 8 };
4049 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4050 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4051 uint32_t hash;
4052
4053 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4054 || (sc->sc_type == WM_T_ICH10)) {
4055 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4056 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4057 return (hash & 0x3ff);
4058 }
4059 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4060 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4061
4062 return (hash & 0xfff);
4063 }
4064
4065 /*
4066 * wm_set_filter:
4067 *
4068 * Set up the receive filter.
4069 */
4070 static void
4071 wm_set_filter(struct wm_softc *sc)
4072 {
4073 struct ethercom *ec = &sc->sc_ethercom;
4074 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4075 struct ether_multi *enm;
4076 struct ether_multistep step;
4077 bus_addr_t mta_reg;
4078 uint32_t hash, reg, bit;
4079 int i, size;
4080
4081 if (sc->sc_type >= WM_T_82544)
4082 mta_reg = WMREG_CORDOVA_MTA;
4083 else
4084 mta_reg = WMREG_MTA;
4085
4086 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4087
4088 if (ifp->if_flags & IFF_BROADCAST)
4089 sc->sc_rctl |= RCTL_BAM;
4090 if (ifp->if_flags & IFF_PROMISC) {
4091 sc->sc_rctl |= RCTL_UPE;
4092 goto allmulti;
4093 }
4094
4095 /*
4096 * Set the station address in the first RAL slot, and
4097 * clear the remaining slots.
4098 */
4099 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4100 || (sc->sc_type == WM_T_ICH10))
4101 size = WM_ICH8_RAL_TABSIZE;
4102 else
4103 size = WM_RAL_TABSIZE;
4104 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4105 for (i = 1; i < size; i++)
4106 wm_set_ral(sc, NULL, i);
4107
4108 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4109 || (sc->sc_type == WM_T_ICH10))
4110 size = WM_ICH8_MC_TABSIZE;
4111 else
4112 size = WM_MC_TABSIZE;
4113 /* Clear out the multicast table. */
4114 for (i = 0; i < size; i++)
4115 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4116
4117 ETHER_FIRST_MULTI(step, ec, enm);
4118 while (enm != NULL) {
4119 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4120 /*
4121 * We must listen to a range of multicast addresses.
4122 * For now, just accept all multicasts, rather than
4123 * trying to set only those filter bits needed to match
4124 * the range. (At this time, the only use of address
4125 * ranges is for IP multicast routing, for which the
4126 * range is big enough to require all bits set.)
4127 */
4128 goto allmulti;
4129 }
4130
4131 hash = wm_mchash(sc, enm->enm_addrlo);
4132
4133 reg = (hash >> 5);
4134 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4135 || (sc->sc_type == WM_T_ICH10))
4136 reg &= 0x1f;
4137 else
4138 reg &= 0x7f;
4139 bit = hash & 0x1f;
4140
4141 hash = CSR_READ(sc, mta_reg + (reg << 2));
4142 hash |= 1U << bit;
4143
4144 /* XXX Hardware bug?? */
4145 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4146 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4147 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4148 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4149 } else
4150 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4151
4152 ETHER_NEXT_MULTI(step, enm);
4153 }
4154
4155 ifp->if_flags &= ~IFF_ALLMULTI;
4156 goto setit;
4157
4158 allmulti:
4159 ifp->if_flags |= IFF_ALLMULTI;
4160 sc->sc_rctl |= RCTL_MPE;
4161
4162 setit:
4163 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4164 }
4165
4166 /*
4167 * wm_tbi_mediainit:
4168 *
4169 * Initialize media for use on 1000BASE-X devices.
4170 */
4171 static void
4172 wm_tbi_mediainit(struct wm_softc *sc)
4173 {
4174 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4175 const char *sep = "";
4176
4177 if (sc->sc_type < WM_T_82543)
4178 sc->sc_tipg = TIPG_WM_DFLT;
4179 else
4180 sc->sc_tipg = TIPG_LG_DFLT;
4181
4182 sc->sc_tbi_anegticks = 5;
4183
4184 /* Initialize our media structures */
4185 sc->sc_mii.mii_ifp = ifp;
4186
4187 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4188 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4189 wm_tbi_mediastatus);
4190
4191 /*
4192 * SWD Pins:
4193 *
4194 * 0 = Link LED (output)
4195 * 1 = Loss Of Signal (input)
4196 */
4197 sc->sc_ctrl |= CTRL_SWDPIO(0);
4198 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4199
4200 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4201
4202 #define ADD(ss, mm, dd) \
4203 do { \
4204 aprint_normal("%s%s", sep, ss); \
4205 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4206 sep = ", "; \
4207 } while (/*CONSTCOND*/0)
4208
4209 aprint_normal_dev(sc->sc_dev, "");
4210 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4211 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4212 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4213 aprint_normal("\n");
4214
4215 #undef ADD
4216
4217 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4218 }
4219
4220 /*
4221 * wm_tbi_mediastatus: [ifmedia interface function]
4222 *
4223 * Get the current interface media status on a 1000BASE-X device.
4224 */
4225 static void
4226 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4227 {
4228 struct wm_softc *sc = ifp->if_softc;
4229 uint32_t ctrl, status;
4230
4231 ifmr->ifm_status = IFM_AVALID;
4232 ifmr->ifm_active = IFM_ETHER;
4233
4234 status = CSR_READ(sc, WMREG_STATUS);
4235 if ((status & STATUS_LU) == 0) {
4236 ifmr->ifm_active |= IFM_NONE;
4237 return;
4238 }
4239
4240 ifmr->ifm_status |= IFM_ACTIVE;
4241 ifmr->ifm_active |= IFM_1000_SX;
4242 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4243 ifmr->ifm_active |= IFM_FDX;
4244 ctrl = CSR_READ(sc, WMREG_CTRL);
4245 if (ctrl & CTRL_RFCE)
4246 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4247 if (ctrl & CTRL_TFCE)
4248 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4249 }
4250
4251 /*
4252 * wm_tbi_mediachange: [ifmedia interface function]
4253 *
4254 * Set hardware to newly-selected media on a 1000BASE-X device.
4255 */
4256 static int
4257 wm_tbi_mediachange(struct ifnet *ifp)
4258 {
4259 struct wm_softc *sc = ifp->if_softc;
4260 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4261 uint32_t status;
4262 int i;
4263
4264 sc->sc_txcw = 0;
4265 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4266 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4267 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4268 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4269 sc->sc_txcw |= TXCW_ANE;
4270 } else {
4271 /*
4272 * If autonegotiation is turned off, force link up and turn on
4273 * full duplex
4274 */
4275 sc->sc_txcw &= ~TXCW_ANE;
4276 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4277 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4278 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4279 delay(1000);
4280 }
4281
4282 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4283 device_xname(sc->sc_dev),sc->sc_txcw));
4284 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4285 delay(10000);
4286
4287 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4288 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4289
4290 /*
4291 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4292 * optics detect a signal, 0 if they don't.
4293 */
4294 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4295 /* Have signal; wait for the link to come up. */
4296
4297 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4298 /*
4299 * Reset the link, and let autonegotiation do its thing
4300 */
4301 sc->sc_ctrl |= CTRL_LRST;
4302 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4303 delay(1000);
4304 sc->sc_ctrl &= ~CTRL_LRST;
4305 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4306 delay(1000);
4307 }
4308
4309 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4310 delay(10000);
4311 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4312 break;
4313 }
4314
4315 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4316 device_xname(sc->sc_dev),i));
4317
4318 status = CSR_READ(sc, WMREG_STATUS);
4319 DPRINTF(WM_DEBUG_LINK,
4320 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4321 device_xname(sc->sc_dev),status, STATUS_LU));
4322 if (status & STATUS_LU) {
4323 /* Link is up. */
4324 DPRINTF(WM_DEBUG_LINK,
4325 ("%s: LINK: set media -> link up %s\n",
4326 device_xname(sc->sc_dev),
4327 (status & STATUS_FD) ? "FDX" : "HDX"));
4328
4329 /*
4330 * NOTE: CTRL will update TFCE and RFCE automatically,
4331 * so we should update sc->sc_ctrl
4332 */
4333 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4334 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4335 sc->sc_fcrtl &= ~FCRTL_XONE;
4336 if (status & STATUS_FD)
4337 sc->sc_tctl |=
4338 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4339 else
4340 sc->sc_tctl |=
4341 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4342 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4343 sc->sc_fcrtl |= FCRTL_XONE;
4344 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4345 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4346 WMREG_OLD_FCRTL : WMREG_FCRTL,
4347 sc->sc_fcrtl);
4348 sc->sc_tbi_linkup = 1;
4349 } else {
4350 if (i == WM_LINKUP_TIMEOUT)
4351 wm_check_for_link(sc);
4352 /* Link is down. */
4353 DPRINTF(WM_DEBUG_LINK,
4354 ("%s: LINK: set media -> link down\n",
4355 device_xname(sc->sc_dev)));
4356 sc->sc_tbi_linkup = 0;
4357 }
4358 } else {
4359 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4360 device_xname(sc->sc_dev)));
4361 sc->sc_tbi_linkup = 0;
4362 }
4363
4364 wm_tbi_set_linkled(sc);
4365
4366 return (0);
4367 }
4368
4369 /*
4370 * wm_tbi_set_linkled:
4371 *
4372 * Update the link LED on 1000BASE-X devices.
4373 */
4374 static void
4375 wm_tbi_set_linkled(struct wm_softc *sc)
4376 {
4377
4378 if (sc->sc_tbi_linkup)
4379 sc->sc_ctrl |= CTRL_SWDPIN(0);
4380 else
4381 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4382
4383 /* 82540 or newer devices are active low */
4384 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4385
4386 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4387 }
4388
4389 /*
4390 * wm_tbi_check_link:
4391 *
4392 * Check the link on 1000BASE-X devices.
4393 */
4394 static void
4395 wm_tbi_check_link(struct wm_softc *sc)
4396 {
4397 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4398 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4399 uint32_t rxcw, ctrl, status;
4400
4401 status = CSR_READ(sc, WMREG_STATUS);
4402
4403 rxcw = CSR_READ(sc, WMREG_RXCW);
4404 ctrl = CSR_READ(sc, WMREG_CTRL);
4405
4406 /* set link status */
4407 if ((status & STATUS_LU) == 0) {
4408 DPRINTF(WM_DEBUG_LINK,
4409 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4410 sc->sc_tbi_linkup = 0;
4411 } else if (sc->sc_tbi_linkup == 0) {
4412 DPRINTF(WM_DEBUG_LINK,
4413 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4414 (status & STATUS_FD) ? "FDX" : "HDX"));
4415 sc->sc_tbi_linkup = 1;
4416 }
4417
4418 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4419 && ((status & STATUS_LU) == 0)) {
4420 sc->sc_tbi_linkup = 0;
4421 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4422 /* RXCFG storm! */
4423 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4424 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4425 wm_init(ifp);
4426 wm_start(ifp);
4427 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4428 /* If the timer expired, retry autonegotiation */
4429 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4430 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4431 sc->sc_tbi_ticks = 0;
4432 /*
4433 * Reset the link, and let autonegotiation do
4434 * its thing
4435 */
4436 sc->sc_ctrl |= CTRL_LRST;
4437 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4438 delay(1000);
4439 sc->sc_ctrl &= ~CTRL_LRST;
4440 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4441 delay(1000);
4442 CSR_WRITE(sc, WMREG_TXCW,
4443 sc->sc_txcw & ~TXCW_ANE);
4444 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4445 }
4446 }
4447 }
4448
4449 wm_tbi_set_linkled(sc);
4450 }
4451
4452 /*
4453 * wm_gmii_reset:
4454 *
4455 * Reset the PHY.
4456 */
4457 static void
4458 wm_gmii_reset(struct wm_softc *sc)
4459 {
4460 uint32_t reg;
4461 int func = 0; /* XXX gcc */
4462
4463 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4464 || (sc->sc_type == WM_T_ICH10)) {
4465 if (wm_get_swfwhw_semaphore(sc)) {
4466 aprint_error_dev(sc->sc_dev,
4467 "%s: failed to get semaphore\n", __func__);
4468 return;
4469 }
4470 }
4471 if (sc->sc_type == WM_T_80003) {
4472 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4473 if (wm_get_swfw_semaphore(sc,
4474 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4475 aprint_error_dev(sc->sc_dev,
4476 "%s: failed to get semaphore\n", __func__);
4477 return;
4478 }
4479 }
4480 if (sc->sc_type >= WM_T_82544) {
4481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4482 delay(20000);
4483
4484 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4485 delay(20000);
4486 } else {
4487 /*
4488 * With 82543, we need to force speed and duplex on the MAC
4489 * equal to what the PHY speed and duplex configuration is.
4490 * In addition, we need to perform a hardware reset on the PHY
4491 * to take it out of reset.
4492 */
4493 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4494 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4495
4496 /* The PHY reset pin is active-low. */
4497 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4498 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4499 CTRL_EXT_SWDPIN(4));
4500 reg |= CTRL_EXT_SWDPIO(4);
4501
4502 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4503 delay(10);
4504
4505 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4506 delay(10000);
4507
4508 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4509 delay(10);
4510 #if 0
4511 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4512 #endif
4513 }
4514 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4515 || (sc->sc_type == WM_T_ICH10))
4516 wm_put_swfwhw_semaphore(sc);
4517 if (sc->sc_type == WM_T_80003)
4518 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4519 }
4520
4521 /*
4522 * wm_gmii_mediainit:
4523 *
4524 * Initialize media for use on 1000BASE-T devices.
4525 */
4526 static void
4527 wm_gmii_mediainit(struct wm_softc *sc)
4528 {
4529 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4530
4531 /* We have MII. */
4532 sc->sc_flags |= WM_F_HAS_MII;
4533
4534 if (sc->sc_type == WM_T_80003)
4535 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4536 else
4537 sc->sc_tipg = TIPG_1000T_DFLT;
4538
4539 /*
4540 * Let the chip set speed/duplex on its own based on
4541 * signals from the PHY.
4542 * XXXbouyer - I'm not sure this is right for the 80003,
4543 * the em driver only sets CTRL_SLU here - but it seems to work.
4544 */
4545 sc->sc_ctrl |= CTRL_SLU;
4546 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4547
4548 /* Initialize our media structures and probe the GMII. */
4549 sc->sc_mii.mii_ifp = ifp;
4550
4551 if (sc->sc_type == WM_T_ICH10) {
4552 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4553 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4554 } else if (sc->sc_type >= WM_T_80003) {
4555 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4556 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4557 } else if (sc->sc_type >= WM_T_82544) {
4558 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4559 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4560 } else {
4561 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4562 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4563 }
4564 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4565
4566 wm_gmii_reset(sc);
4567
4568 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4569 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4570 wm_gmii_mediastatus);
4571
4572 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4573 MII_OFFSET_ANY, MIIF_DOPAUSE);
4574 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4575 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4576 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4577 } else
4578 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4579 }
4580
4581 /*
4582 * wm_gmii_mediastatus: [ifmedia interface function]
4583 *
4584 * Get the current interface media status on a 1000BASE-T device.
4585 */
4586 static void
4587 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4588 {
4589 struct wm_softc *sc = ifp->if_softc;
4590
4591 ether_mediastatus(ifp, ifmr);
4592 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4593 sc->sc_flowflags;
4594 }
4595
4596 /*
4597 * wm_gmii_mediachange: [ifmedia interface function]
4598 *
4599 * Set hardware to newly-selected media on a 1000BASE-T device.
4600 */
4601 static int
4602 wm_gmii_mediachange(struct ifnet *ifp)
4603 {
4604 struct wm_softc *sc = ifp->if_softc;
4605 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4606 int rc;
4607
4608 if ((ifp->if_flags & IFF_UP) == 0)
4609 return 0;
4610
4611 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4612 sc->sc_ctrl |= CTRL_SLU;
4613 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4614 || (sc->sc_type > WM_T_82543)) {
4615 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4616 } else {
4617 sc->sc_ctrl &= ~CTRL_ASDE;
4618 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4619 if (ife->ifm_media & IFM_FDX)
4620 sc->sc_ctrl |= CTRL_FD;
4621 switch(IFM_SUBTYPE(ife->ifm_media)) {
4622 case IFM_10_T:
4623 sc->sc_ctrl |= CTRL_SPEED_10;
4624 break;
4625 case IFM_100_TX:
4626 sc->sc_ctrl |= CTRL_SPEED_100;
4627 break;
4628 case IFM_1000_T:
4629 sc->sc_ctrl |= CTRL_SPEED_1000;
4630 break;
4631 default:
4632 panic("wm_gmii_mediachange: bad media 0x%x",
4633 ife->ifm_media);
4634 }
4635 }
4636 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4637 if (sc->sc_type <= WM_T_82543)
4638 wm_gmii_reset(sc);
4639
4640 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4641 return 0;
4642 return rc;
4643 }
4644
4645 #define MDI_IO CTRL_SWDPIN(2)
4646 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4647 #define MDI_CLK CTRL_SWDPIN(3)
4648
4649 static void
4650 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4651 {
4652 uint32_t i, v;
4653
4654 v = CSR_READ(sc, WMREG_CTRL);
4655 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4656 v |= MDI_DIR | CTRL_SWDPIO(3);
4657
4658 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4659 if (data & i)
4660 v |= MDI_IO;
4661 else
4662 v &= ~MDI_IO;
4663 CSR_WRITE(sc, WMREG_CTRL, v);
4664 delay(10);
4665 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4666 delay(10);
4667 CSR_WRITE(sc, WMREG_CTRL, v);
4668 delay(10);
4669 }
4670 }
4671
4672 static uint32_t
4673 i82543_mii_recvbits(struct wm_softc *sc)
4674 {
4675 uint32_t v, i, data = 0;
4676
4677 v = CSR_READ(sc, WMREG_CTRL);
4678 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4679 v |= CTRL_SWDPIO(3);
4680
4681 CSR_WRITE(sc, WMREG_CTRL, v);
4682 delay(10);
4683 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4684 delay(10);
4685 CSR_WRITE(sc, WMREG_CTRL, v);
4686 delay(10);
4687
4688 for (i = 0; i < 16; i++) {
4689 data <<= 1;
4690 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4691 delay(10);
4692 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4693 data |= 1;
4694 CSR_WRITE(sc, WMREG_CTRL, v);
4695 delay(10);
4696 }
4697
4698 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4699 delay(10);
4700 CSR_WRITE(sc, WMREG_CTRL, v);
4701 delay(10);
4702
4703 return (data);
4704 }
4705
4706 #undef MDI_IO
4707 #undef MDI_DIR
4708 #undef MDI_CLK
4709
4710 /*
4711 * wm_gmii_i82543_readreg: [mii interface function]
4712 *
4713 * Read a PHY register on the GMII (i82543 version).
4714 */
4715 static int
4716 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4717 {
4718 struct wm_softc *sc = device_private(self);
4719 int rv;
4720
4721 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4722 i82543_mii_sendbits(sc, reg | (phy << 5) |
4723 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4724 rv = i82543_mii_recvbits(sc) & 0xffff;
4725
4726 DPRINTF(WM_DEBUG_GMII,
4727 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4728 device_xname(sc->sc_dev), phy, reg, rv));
4729
4730 return (rv);
4731 }
4732
4733 /*
4734 * wm_gmii_i82543_writereg: [mii interface function]
4735 *
4736 * Write a PHY register on the GMII (i82543 version).
4737 */
4738 static void
4739 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4740 {
4741 struct wm_softc *sc = device_private(self);
4742
4743 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4744 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4745 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4746 (MII_COMMAND_START << 30), 32);
4747 }
4748
4749 /*
4750 * wm_gmii_i82544_readreg: [mii interface function]
4751 *
4752 * Read a PHY register on the GMII.
4753 */
4754 static int
4755 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4756 {
4757 struct wm_softc *sc = device_private(self);
4758 uint32_t mdic = 0;
4759 int i, rv;
4760
4761 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4762 MDIC_REGADD(reg));
4763
4764 for (i = 0; i < 320; i++) {
4765 mdic = CSR_READ(sc, WMREG_MDIC);
4766 if (mdic & MDIC_READY)
4767 break;
4768 delay(10);
4769 }
4770
4771 if ((mdic & MDIC_READY) == 0) {
4772 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4773 device_xname(sc->sc_dev), phy, reg);
4774 rv = 0;
4775 } else if (mdic & MDIC_E) {
4776 #if 0 /* This is normal if no PHY is present. */
4777 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4778 device_xname(sc->sc_dev), phy, reg);
4779 #endif
4780 rv = 0;
4781 } else {
4782 rv = MDIC_DATA(mdic);
4783 if (rv == 0xffff)
4784 rv = 0;
4785 }
4786
4787 return (rv);
4788 }
4789
4790 /*
4791 * wm_gmii_i82544_writereg: [mii interface function]
4792 *
4793 * Write a PHY register on the GMII.
4794 */
4795 static void
4796 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4797 {
4798 struct wm_softc *sc = device_private(self);
4799 uint32_t mdic = 0;
4800 int i;
4801
4802 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4803 MDIC_REGADD(reg) | MDIC_DATA(val));
4804
4805 for (i = 0; i < 320; i++) {
4806 mdic = CSR_READ(sc, WMREG_MDIC);
4807 if (mdic & MDIC_READY)
4808 break;
4809 delay(10);
4810 }
4811
4812 if ((mdic & MDIC_READY) == 0)
4813 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4814 device_xname(sc->sc_dev), phy, reg);
4815 else if (mdic & MDIC_E)
4816 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4817 device_xname(sc->sc_dev), phy, reg);
4818 }
4819
4820 /*
4821 * wm_gmii_i80003_readreg: [mii interface function]
4822 *
4823 * Read a PHY register on the kumeran
4824 * This could be handled by the PHY layer if we didn't have to lock the
4825 * ressource ...
4826 */
4827 static int
4828 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4829 {
4830 struct wm_softc *sc = device_private(self);
4831 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4832 int rv;
4833
4834 if (phy != 1) /* only one PHY on kumeran bus */
4835 return 0;
4836
4837 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4838 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4839 __func__);
4840 return 0;
4841 }
4842
4843 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4844 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4845 reg >> GG82563_PAGE_SHIFT);
4846 } else {
4847 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4848 reg >> GG82563_PAGE_SHIFT);
4849 }
4850 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4851 delay(200);
4852 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4853 delay(200);
4854
4855 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4856 return (rv);
4857 }
4858
4859 /*
4860 * wm_gmii_i80003_writereg: [mii interface function]
4861 *
4862 * Write a PHY register on the kumeran.
4863 * This could be handled by the PHY layer if we didn't have to lock the
4864 * ressource ...
4865 */
4866 static void
4867 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4868 {
4869 struct wm_softc *sc = device_private(self);
4870 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4871
4872 if (phy != 1) /* only one PHY on kumeran bus */
4873 return;
4874
4875 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4876 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4877 __func__);
4878 return;
4879 }
4880
4881 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4882 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4883 reg >> GG82563_PAGE_SHIFT);
4884 } else {
4885 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4886 reg >> GG82563_PAGE_SHIFT);
4887 }
4888 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4889 delay(200);
4890 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4891 delay(200);
4892
4893 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4894 }
4895
4896 /*
4897 * wm_gmii_bm_readreg: [mii interface function]
4898 *
4899 * Read a PHY register on the kumeran
4900 * This could be handled by the PHY layer if we didn't have to lock the
4901 * ressource ...
4902 */
4903 static int
4904 wm_gmii_bm_readreg(device_t self, int phy, int reg)
4905 {
4906 struct wm_softc *sc = device_private(self);
4907 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4908 int rv;
4909
4910 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4911 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4912 __func__);
4913 return 0;
4914 }
4915
4916 if (reg > GG82563_MAX_REG_ADDRESS) {
4917 if (phy == 1)
4918 wm_gmii_i82544_writereg(self, phy, 0x1f,
4919 reg);
4920 else
4921 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4922 reg >> GG82563_PAGE_SHIFT);
4923
4924 }
4925
4926 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4927 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4928 return (rv);
4929 }
4930
4931 /*
4932 * wm_gmii_bm_writereg: [mii interface function]
4933 *
4934 * Write a PHY register on the kumeran.
4935 * This could be handled by the PHY layer if we didn't have to lock the
4936 * ressource ...
4937 */
4938 static void
4939 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
4940 {
4941 struct wm_softc *sc = device_private(self);
4942 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4943
4944 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4945 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4946 __func__);
4947 return;
4948 }
4949
4950 if (reg > GG82563_MAX_REG_ADDRESS) {
4951 if (phy == 1)
4952 wm_gmii_i82544_writereg(self, phy, 0x1f,
4953 reg);
4954 else
4955 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4956 reg >> GG82563_PAGE_SHIFT);
4957
4958 }
4959
4960 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4961 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4962 }
4963
4964 /*
4965 * wm_gmii_statchg: [mii interface function]
4966 *
4967 * Callback from MII layer when media changes.
4968 */
4969 static void
4970 wm_gmii_statchg(device_t self)
4971 {
4972 struct wm_softc *sc = device_private(self);
4973 struct mii_data *mii = &sc->sc_mii;
4974
4975 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4976 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4977 sc->sc_fcrtl &= ~FCRTL_XONE;
4978
4979 /*
4980 * Get flow control negotiation result.
4981 */
4982 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4983 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4984 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4985 mii->mii_media_active &= ~IFM_ETH_FMASK;
4986 }
4987
4988 if (sc->sc_flowflags & IFM_FLOW) {
4989 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4990 sc->sc_ctrl |= CTRL_TFCE;
4991 sc->sc_fcrtl |= FCRTL_XONE;
4992 }
4993 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4994 sc->sc_ctrl |= CTRL_RFCE;
4995 }
4996
4997 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4998 DPRINTF(WM_DEBUG_LINK,
4999 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
5000 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5001 } else {
5002 DPRINTF(WM_DEBUG_LINK,
5003 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
5004 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5005 }
5006
5007 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5008 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5009 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
5010 : WMREG_FCRTL, sc->sc_fcrtl);
5011 if (sc->sc_type == WM_T_80003) {
5012 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
5013 case IFM_1000_T:
5014 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5015 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
5016 sc->sc_tipg = TIPG_1000T_80003_DFLT;
5017 break;
5018 default:
5019 wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
5020 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
5021 sc->sc_tipg = TIPG_10_100_80003_DFLT;
5022 break;
5023 }
5024 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5025 }
5026 }
5027
5028 /*
5029 * wm_kmrn_readreg:
5030 *
5031 * Read a kumeran register
5032 */
5033 static int
5034 wm_kmrn_readreg(struct wm_softc *sc, int reg)
5035 {
5036 int rv;
5037
5038 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5039 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5040 aprint_error_dev(sc->sc_dev,
5041 "%s: failed to get semaphore\n", __func__);
5042 return 0;
5043 }
5044 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5045 if (wm_get_swfwhw_semaphore(sc)) {
5046 aprint_error_dev(sc->sc_dev,
5047 "%s: failed to get semaphore\n", __func__);
5048 return 0;
5049 }
5050 }
5051
5052 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5053 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5054 KUMCTRLSTA_REN);
5055 delay(2);
5056
5057 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5058
5059 if (sc->sc_flags == WM_F_SWFW_SYNC)
5060 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5061 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5062 wm_put_swfwhw_semaphore(sc);
5063
5064 return (rv);
5065 }
5066
5067 /*
5068 * wm_kmrn_writereg:
5069 *
5070 * Write a kumeran register
5071 */
5072 static void
5073 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
5074 {
5075
5076 if (sc->sc_flags == WM_F_SWFW_SYNC) {
5077 if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
5078 aprint_error_dev(sc->sc_dev,
5079 "%s: failed to get semaphore\n", __func__);
5080 return;
5081 }
5082 } else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
5083 if (wm_get_swfwhw_semaphore(sc)) {
5084 aprint_error_dev(sc->sc_dev,
5085 "%s: failed to get semaphore\n", __func__);
5086 return;
5087 }
5088 }
5089
5090 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5091 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5092 (val & KUMCTRLSTA_MASK));
5093
5094 if (sc->sc_flags == WM_F_SWFW_SYNC)
5095 wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
5096 else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
5097 wm_put_swfwhw_semaphore(sc);
5098 }
5099
5100 static int
5101 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5102 {
5103 uint32_t eecd = 0;
5104
5105 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
5106 eecd = CSR_READ(sc, WMREG_EECD);
5107
5108 /* Isolate bits 15 & 16 */
5109 eecd = ((eecd >> 15) & 0x03);
5110
5111 /* If both bits are set, device is Flash type */
5112 if (eecd == 0x03) {
5113 return 0;
5114 }
5115 }
5116 return 1;
5117 }
5118
5119 static int
5120 wm_get_swsm_semaphore(struct wm_softc *sc)
5121 {
5122 int32_t timeout;
5123 uint32_t swsm;
5124
5125 /* Get the FW semaphore. */
5126 timeout = 1000 + 1; /* XXX */
5127 while (timeout) {
5128 swsm = CSR_READ(sc, WMREG_SWSM);
5129 swsm |= SWSM_SWESMBI;
5130 CSR_WRITE(sc, WMREG_SWSM, swsm);
5131 /* if we managed to set the bit we got the semaphore. */
5132 swsm = CSR_READ(sc, WMREG_SWSM);
5133 if (swsm & SWSM_SWESMBI)
5134 break;
5135
5136 delay(50);
5137 timeout--;
5138 }
5139
5140 if (timeout == 0) {
5141 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5142 /* Release semaphores */
5143 wm_put_swsm_semaphore(sc);
5144 return 1;
5145 }
5146 return 0;
5147 }
5148
5149 static void
5150 wm_put_swsm_semaphore(struct wm_softc *sc)
5151 {
5152 uint32_t swsm;
5153
5154 swsm = CSR_READ(sc, WMREG_SWSM);
5155 swsm &= ~(SWSM_SWESMBI);
5156 CSR_WRITE(sc, WMREG_SWSM, swsm);
5157 }
5158
5159 static int
5160 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5161 {
5162 uint32_t swfw_sync;
5163 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5164 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5165 int timeout = 200;
5166
5167 for(timeout = 0; timeout < 200; timeout++) {
5168 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5169 if (wm_get_swsm_semaphore(sc)) {
5170 aprint_error_dev(sc->sc_dev,
5171 "%s: failed to get semaphore\n",
5172 __func__);
5173 return 1;
5174 }
5175 }
5176 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5177 if ((swfw_sync & (swmask | fwmask)) == 0) {
5178 swfw_sync |= swmask;
5179 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5180 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5181 wm_put_swsm_semaphore(sc);
5182 return 0;
5183 }
5184 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5185 wm_put_swsm_semaphore(sc);
5186 delay(5000);
5187 }
5188 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5189 device_xname(sc->sc_dev), mask, swfw_sync);
5190 return 1;
5191 }
5192
5193 static void
5194 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5195 {
5196 uint32_t swfw_sync;
5197
5198 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5199 while (wm_get_swsm_semaphore(sc) != 0)
5200 continue;
5201 }
5202 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5203 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5204 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5205 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5206 wm_put_swsm_semaphore(sc);
5207 }
5208
5209 static int
5210 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5211 {
5212 uint32_t ext_ctrl;
5213 int timeout = 200;
5214
5215 for(timeout = 0; timeout < 200; timeout++) {
5216 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5217 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5218 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5219
5220 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5221 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5222 return 0;
5223 delay(5000);
5224 }
5225 printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
5226 device_xname(sc->sc_dev), ext_ctrl);
5227 return 1;
5228 }
5229
5230 static void
5231 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5232 {
5233 uint32_t ext_ctrl;
5234 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5235 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5236 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5237 }
5238
5239 static int
5240 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5241 {
5242 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5243 uint8_t bank_high_byte;
5244 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5245
5246 if (sc->sc_type != WM_T_ICH10) {
5247 /* Value of bit 22 corresponds to the flash bank we're on. */
5248 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5249 } else {
5250 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5251 if ((bank_high_byte & 0xc0) == 0x80)
5252 *bank = 0;
5253 else {
5254 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5255 &bank_high_byte);
5256 if ((bank_high_byte & 0xc0) == 0x80)
5257 *bank = 1;
5258 else {
5259 aprint_error_dev(sc->sc_dev,
5260 "EEPROM not present\n");
5261 return -1;
5262 }
5263 }
5264 }
5265
5266 return 0;
5267 }
5268
5269 /******************************************************************************
5270 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5271 * register.
5272 *
5273 * sc - Struct containing variables accessed by shared code
5274 * offset - offset of word in the EEPROM to read
5275 * data - word read from the EEPROM
5276 * words - number of words to read
5277 *****************************************************************************/
5278 static int
5279 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5280 {
5281 int32_t error = 0;
5282 uint32_t flash_bank = 0;
5283 uint32_t act_offset = 0;
5284 uint32_t bank_offset = 0;
5285 uint16_t word = 0;
5286 uint16_t i = 0;
5287
5288 /* We need to know which is the valid flash bank. In the event
5289 * that we didn't allocate eeprom_shadow_ram, we may not be
5290 * managing flash_bank. So it cannot be trusted and needs
5291 * to be updated with each read.
5292 */
5293 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5294 if (error) {
5295 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5296 __func__);
5297 return error;
5298 }
5299
5300 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5301 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5302
5303 error = wm_get_swfwhw_semaphore(sc);
5304 if (error) {
5305 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5306 __func__);
5307 return error;
5308 }
5309
5310 for (i = 0; i < words; i++) {
5311 /* The NVM part needs a byte offset, hence * 2 */
5312 act_offset = bank_offset + ((offset + i) * 2);
5313 error = wm_read_ich8_word(sc, act_offset, &word);
5314 if (error) {
5315 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5316 __func__);
5317 break;
5318 }
5319 data[i] = word;
5320 }
5321
5322 wm_put_swfwhw_semaphore(sc);
5323 return error;
5324 }
5325
5326 /******************************************************************************
5327 * This function does initial flash setup so that a new read/write/erase cycle
5328 * can be started.
5329 *
5330 * sc - The pointer to the hw structure
5331 ****************************************************************************/
5332 static int32_t
5333 wm_ich8_cycle_init(struct wm_softc *sc)
5334 {
5335 uint16_t hsfsts;
5336 int32_t error = 1;
5337 int32_t i = 0;
5338
5339 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5340
5341 /* May be check the Flash Des Valid bit in Hw status */
5342 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5343 return error;
5344 }
5345
5346 /* Clear FCERR in Hw status by writing 1 */
5347 /* Clear DAEL in Hw status by writing a 1 */
5348 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5349
5350 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5351
5352 /* Either we should have a hardware SPI cycle in progress bit to check
5353 * against, in order to start a new cycle or FDONE bit should be changed
5354 * in the hardware so that it is 1 after harware reset, which can then be
5355 * used as an indication whether a cycle is in progress or has been
5356 * completed .. we should also have some software semaphore mechanism to
5357 * guard FDONE or the cycle in progress bit so that two threads access to
5358 * those bits can be sequentiallized or a way so that 2 threads dont
5359 * start the cycle at the same time */
5360
5361 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5362 /* There is no cycle running at present, so we can start a cycle */
5363 /* Begin by setting Flash Cycle Done. */
5364 hsfsts |= HSFSTS_DONE;
5365 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5366 error = 0;
5367 } else {
5368 /* otherwise poll for sometime so the current cycle has a chance
5369 * to end before giving up. */
5370 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5371 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5372 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5373 error = 0;
5374 break;
5375 }
5376 delay(1);
5377 }
5378 if (error == 0) {
5379 /* Successful in waiting for previous cycle to timeout,
5380 * now set the Flash Cycle Done. */
5381 hsfsts |= HSFSTS_DONE;
5382 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5383 }
5384 }
5385 return error;
5386 }
5387
5388 /******************************************************************************
5389 * This function starts a flash cycle and waits for its completion
5390 *
5391 * sc - The pointer to the hw structure
5392 ****************************************************************************/
5393 static int32_t
5394 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5395 {
5396 uint16_t hsflctl;
5397 uint16_t hsfsts;
5398 int32_t error = 1;
5399 uint32_t i = 0;
5400
5401 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5402 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5403 hsflctl |= HSFCTL_GO;
5404 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5405
5406 /* wait till FDONE bit is set to 1 */
5407 do {
5408 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5409 if (hsfsts & HSFSTS_DONE)
5410 break;
5411 delay(1);
5412 i++;
5413 } while (i < timeout);
5414 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5415 error = 0;
5416 }
5417 return error;
5418 }
5419
5420 /******************************************************************************
5421 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5422 *
5423 * sc - The pointer to the hw structure
5424 * index - The index of the byte or word to read.
5425 * size - Size of data to read, 1=byte 2=word
5426 * data - Pointer to the word to store the value read.
5427 *****************************************************************************/
5428 static int32_t
5429 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5430 uint32_t size, uint16_t* data)
5431 {
5432 uint16_t hsfsts;
5433 uint16_t hsflctl;
5434 uint32_t flash_linear_address;
5435 uint32_t flash_data = 0;
5436 int32_t error = 1;
5437 int32_t count = 0;
5438
5439 if (size < 1 || size > 2 || data == 0x0 ||
5440 index > ICH_FLASH_LINEAR_ADDR_MASK)
5441 return error;
5442
5443 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5444 sc->sc_ich8_flash_base;
5445
5446 do {
5447 delay(1);
5448 /* Steps */
5449 error = wm_ich8_cycle_init(sc);
5450 if (error)
5451 break;
5452
5453 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5454 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5455 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5456 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5457 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5458
5459 /* Write the last 24 bits of index into Flash Linear address field in
5460 * Flash Address */
5461 /* TODO: TBD maybe check the index against the size of flash */
5462
5463 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5464
5465 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5466
5467 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5468 * sequence a few more times, else read in (shift in) the Flash Data0,
5469 * the order is least significant byte first msb to lsb */
5470 if (error == 0) {
5471 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5472 if (size == 1) {
5473 *data = (uint8_t)(flash_data & 0x000000FF);
5474 } else if (size == 2) {
5475 *data = (uint16_t)(flash_data & 0x0000FFFF);
5476 }
5477 break;
5478 } else {
5479 /* If we've gotten here, then things are probably completely hosed,
5480 * but if the error condition is detected, it won't hurt to give
5481 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5482 */
5483 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5484 if (hsfsts & HSFSTS_ERR) {
5485 /* Repeat for some time before giving up. */
5486 continue;
5487 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5488 break;
5489 }
5490 }
5491 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5492
5493 return error;
5494 }
5495
5496 /******************************************************************************
5497 * Reads a single byte from the NVM using the ICH8 flash access registers.
5498 *
5499 * sc - pointer to wm_hw structure
5500 * index - The index of the byte to read.
5501 * data - Pointer to a byte to store the value read.
5502 *****************************************************************************/
5503 static int32_t
5504 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5505 {
5506 int32_t status;
5507 uint16_t word = 0;
5508
5509 status = wm_read_ich8_data(sc, index, 1, &word);
5510 if (status == 0) {
5511 *data = (uint8_t)word;
5512 }
5513
5514 return status;
5515 }
5516
5517 /******************************************************************************
5518 * Reads a word from the NVM using the ICH8 flash access registers.
5519 *
5520 * sc - pointer to wm_hw structure
5521 * index - The starting byte index of the word to read.
5522 * data - Pointer to a word to store the value read.
5523 *****************************************************************************/
5524 static int32_t
5525 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5526 {
5527 int32_t status;
5528
5529 status = wm_read_ich8_data(sc, index, 2, data);
5530 return status;
5531 }
5532
5533 static int
5534 wm_check_mng_mode(struct wm_softc *sc)
5535 {
5536 int rv;
5537
5538 switch (sc->sc_type) {
5539 case WM_T_ICH8:
5540 case WM_T_ICH9:
5541 case WM_T_ICH10:
5542 rv = wm_check_mng_mode_ich8lan(sc);
5543 break;
5544 #if 0
5545 case WM_T_82574:
5546 /*
5547 * The function is provided in em driver, but it's not
5548 * used. Why?
5549 */
5550 rv = wm_check_mng_mode_82574(sc);
5551 break;
5552 #endif
5553 case WM_T_82571:
5554 case WM_T_82572:
5555 case WM_T_82573:
5556 case WM_T_80003:
5557 rv = wm_check_mng_mode_generic(sc);
5558 break;
5559 default:
5560 /* noting to do */
5561 rv = 0;
5562 break;
5563 }
5564
5565 return rv;
5566 }
5567
5568 static int
5569 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5570 {
5571 uint32_t fwsm;
5572
5573 fwsm = CSR_READ(sc, WMREG_FWSM);
5574
5575 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5576 return 1;
5577
5578 return 0;
5579 }
5580
5581 #if 0
5582 static int
5583 wm_check_mng_mode_82574(struct wm_softc *sc)
5584 {
5585 uint16_t data;
5586
5587 wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5588
5589 if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5590 return 1;
5591
5592 return 0;
5593 }
5594 #endif
5595
5596 static int
5597 wm_check_mng_mode_generic(struct wm_softc *sc)
5598 {
5599 uint32_t fwsm;
5600
5601 fwsm = CSR_READ(sc, WMREG_FWSM);
5602
5603 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5604 return 1;
5605
5606 return 0;
5607 }
5608
5609 static void
5610 wm_get_hw_control(struct wm_softc *sc)
5611 {
5612 uint32_t reg;
5613
5614 switch (sc->sc_type) {
5615 case WM_T_82573:
5616 #if 0
5617 case WM_T_82574:
5618 /*
5619 * FreeBSD's em driver has the function for 82574 to checks
5620 * the management mode, but it's not used. Why?
5621 */
5622 #endif
5623 reg = CSR_READ(sc, WMREG_SWSM);
5624 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5625 break;
5626 case WM_T_82571:
5627 case WM_T_82572:
5628 case WM_T_80003:
5629 case WM_T_ICH8:
5630 case WM_T_ICH9:
5631 case WM_T_ICH10:
5632 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5633 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5634 break;
5635 default:
5636 break;
5637 }
5638 }
5639
5640 /* XXX Currently TBI only */
5641 static int
5642 wm_check_for_link(struct wm_softc *sc)
5643 {
5644 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5645 uint32_t rxcw;
5646 uint32_t ctrl;
5647 uint32_t status;
5648 uint32_t sig;
5649
5650 rxcw = CSR_READ(sc, WMREG_RXCW);
5651 ctrl = CSR_READ(sc, WMREG_CTRL);
5652 status = CSR_READ(sc, WMREG_STATUS);
5653
5654 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5655
5656 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5657 device_xname(sc->sc_dev), __func__,
5658 ((ctrl & CTRL_SWDPIN(1)) == sig),
5659 ((status & STATUS_LU) != 0),
5660 ((rxcw & RXCW_C) != 0)
5661 ));
5662
5663 /*
5664 * SWDPIN LU RXCW
5665 * 0 0 0
5666 * 0 0 1 (should not happen)
5667 * 0 1 0 (should not happen)
5668 * 0 1 1 (should not happen)
5669 * 1 0 0 Disable autonego and force linkup
5670 * 1 0 1 got /C/ but not linkup yet
5671 * 1 1 0 (linkup)
5672 * 1 1 1 If IFM_AUTO, back to autonego
5673 *
5674 */
5675 if (((ctrl & CTRL_SWDPIN(1)) == sig)
5676 && ((status & STATUS_LU) == 0)
5677 && ((rxcw & RXCW_C) == 0)) {
5678 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5679 __func__));
5680 sc->sc_tbi_linkup = 0;
5681 /* Disable auto-negotiation in the TXCW register */
5682 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5683
5684 /*
5685 * Force link-up and also force full-duplex.
5686 *
5687 * NOTE: CTRL was updated TFCE and RFCE automatically,
5688 * so we should update sc->sc_ctrl
5689 */
5690 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5691 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5692 } else if(((status & STATUS_LU) != 0)
5693 && ((rxcw & RXCW_C) != 0)
5694 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5695 sc->sc_tbi_linkup = 1;
5696 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5697 __func__));
5698 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5699 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5700 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5701 && ((rxcw & RXCW_C) != 0)) {
5702 DPRINTF(WM_DEBUG_LINK, ("/C/"));
5703 } else {
5704 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5705 status));
5706 }
5707
5708 return 0;
5709 }
5710