if_wm.c revision 1.176 1 /* $NetBSD: if_wm.c,v 1.176 2009/07/13 23:31:19 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 */
77
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.176 2009/07/13 23:31:19 msaitoh Exp $");
80
81 #include "bpfilter.h"
82 #include "rnd.h"
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/callout.h>
87 #include <sys/mbuf.h>
88 #include <sys/malloc.h>
89 #include <sys/kernel.h>
90 #include <sys/socket.h>
91 #include <sys/ioctl.h>
92 #include <sys/errno.h>
93 #include <sys/device.h>
94 #include <sys/queue.h>
95 #include <sys/syslog.h>
96
97 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
98
99 #if NRND > 0
100 #include <sys/rnd.h>
101 #endif
102
103 #include <net/if.h>
104 #include <net/if_dl.h>
105 #include <net/if_media.h>
106 #include <net/if_ether.h>
107
108 #if NBPFILTER > 0
109 #include <net/bpf.h>
110 #endif
111
112 #include <netinet/in.h> /* XXX for struct ip */
113 #include <netinet/in_systm.h> /* XXX for struct ip */
114 #include <netinet/ip.h> /* XXX for struct ip */
115 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
116 #include <netinet/tcp.h> /* XXX for struct tcphdr */
117
118 #include <sys/bus.h>
119 #include <sys/intr.h>
120 #include <machine/endian.h>
121
122 #include <dev/mii/mii.h>
123 #include <dev/mii/miivar.h>
124 #include <dev/mii/mii_bitbang.h>
125 #include <dev/mii/ikphyreg.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130
131 #include <dev/pci/if_wmreg.h>
132
133 #ifdef WM_DEBUG
134 #define WM_DEBUG_LINK 0x01
135 #define WM_DEBUG_TX 0x02
136 #define WM_DEBUG_RX 0x04
137 #define WM_DEBUG_GMII 0x08
138 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
139
140 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
141 #else
142 #define DPRINTF(x, y) /* nothing */
143 #endif /* WM_DEBUG */
144
145 /*
146 * Transmit descriptor list size. Due to errata, we can only have
147 * 256 hardware descriptors in the ring on < 82544, but we use 4096
148 * on >= 82544. We tell the upper layers that they can queue a lot
149 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
150 * of them at a time.
151 *
152 * We allow up to 256 (!) DMA segments per packet. Pathological packet
153 * chains containing many small mbufs have been observed in zero-copy
154 * situations with jumbo frames.
155 */
156 #define WM_NTXSEGS 256
157 #define WM_IFQUEUELEN 256
158 #define WM_TXQUEUELEN_MAX 64
159 #define WM_TXQUEUELEN_MAX_82547 16
160 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
161 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
162 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
163 #define WM_NTXDESC_82542 256
164 #define WM_NTXDESC_82544 4096
165 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
166 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
167 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
168 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
169 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
170
171 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
172
173 /*
174 * Receive descriptor list size. We have one Rx buffer for normal
175 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
176 * packet. We allocate 256 receive descriptors, each with a 2k
177 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
178 */
179 #define WM_NRXDESC 256
180 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
181 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
182 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
183
184 /*
185 * Control structures are DMA'd to the i82542 chip. We allocate them in
186 * a single clump that maps to a single DMA segment to make several things
187 * easier.
188 */
189 struct wm_control_data_82544 {
190 /*
191 * The receive descriptors.
192 */
193 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
194
195 /*
196 * The transmit descriptors. Put these at the end, because
197 * we might use a smaller number of them.
198 */
199 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
200 };
201
202 struct wm_control_data_82542 {
203 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
204 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
205 };
206
207 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
208 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
209 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
210
211 /*
212 * Software state for transmit jobs.
213 */
214 struct wm_txsoft {
215 struct mbuf *txs_mbuf; /* head of our mbuf chain */
216 bus_dmamap_t txs_dmamap; /* our DMA map */
217 int txs_firstdesc; /* first descriptor in packet */
218 int txs_lastdesc; /* last descriptor in packet */
219 int txs_ndesc; /* # of descriptors used */
220 };
221
222 /*
223 * Software state for receive buffers. Each descriptor gets a
224 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
225 * more than one buffer, we chain them together.
226 */
227 struct wm_rxsoft {
228 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
229 bus_dmamap_t rxs_dmamap; /* our DMA map */
230 };
231
232 typedef enum {
233 WM_T_unknown = 0,
234 WM_T_82542_2_0, /* i82542 2.0 (really old) */
235 WM_T_82542_2_1, /* i82542 2.1+ (old) */
236 WM_T_82543, /* i82543 */
237 WM_T_82544, /* i82544 */
238 WM_T_82540, /* i82540 */
239 WM_T_82545, /* i82545 */
240 WM_T_82545_3, /* i82545 3.0+ */
241 WM_T_82546, /* i82546 */
242 WM_T_82546_3, /* i82546 3.0+ */
243 WM_T_82541, /* i82541 */
244 WM_T_82541_2, /* i82541 2.0+ */
245 WM_T_82547, /* i82547 */
246 WM_T_82547_2, /* i82547 2.0+ */
247 WM_T_82571, /* i82571 */
248 WM_T_82572, /* i82572 */
249 WM_T_82573, /* i82573 */
250 WM_T_82574, /* i82574 */
251 WM_T_80003, /* i80003 */
252 WM_T_ICH8, /* ICH8 LAN */
253 WM_T_ICH9, /* ICH9 LAN */
254 WM_T_ICH10, /* ICH10 LAN */
255 } wm_chip_type;
256
257 #define WM_LINKUP_TIMEOUT 50
258
259 /*
260 * Software state per device.
261 */
262 struct wm_softc {
263 device_t sc_dev; /* generic device information */
264 bus_space_tag_t sc_st; /* bus space tag */
265 bus_space_handle_t sc_sh; /* bus space handle */
266 bus_space_tag_t sc_iot; /* I/O space tag */
267 bus_space_handle_t sc_ioh; /* I/O space handle */
268 bus_space_tag_t sc_flasht; /* flash registers space tag */
269 bus_space_handle_t sc_flashh; /* flash registers space handle */
270 bus_dma_tag_t sc_dmat; /* bus DMA tag */
271 struct ethercom sc_ethercom; /* ethernet common data */
272 pci_chipset_tag_t sc_pc;
273 pcitag_t sc_pcitag;
274
275 wm_chip_type sc_type; /* chip type */
276 int sc_flags; /* flags; see below */
277 int sc_bus_speed; /* PCI/PCIX bus speed */
278 int sc_pcix_offset; /* PCIX capability register offset */
279 int sc_flowflags; /* 802.3x flow control flags */
280
281 void *sc_ih; /* interrupt cookie */
282
283 int sc_ee_addrbits; /* EEPROM address bits */
284
285 struct mii_data sc_mii; /* MII/media information */
286
287 callout_t sc_tick_ch; /* tick callout */
288
289 bus_dmamap_t sc_cddmamap; /* control data DMA map */
290 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
291
292 int sc_align_tweak;
293
294 /*
295 * Software state for the transmit and receive descriptors.
296 */
297 int sc_txnum; /* must be a power of two */
298 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
299 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
300
301 /*
302 * Control data structures.
303 */
304 int sc_ntxdesc; /* must be a power of two */
305 struct wm_control_data_82544 *sc_control_data;
306 #define sc_txdescs sc_control_data->wcd_txdescs
307 #define sc_rxdescs sc_control_data->wcd_rxdescs
308
309 #ifdef WM_EVENT_COUNTERS
310 /* Event counters. */
311 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
312 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
313 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
314 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
315 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
316 struct evcnt sc_ev_rxintr; /* Rx interrupts */
317 struct evcnt sc_ev_linkintr; /* Link interrupts */
318
319 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
320 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
321 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
322 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
323 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
324 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
325 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
326 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
327
328 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
329 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
330
331 struct evcnt sc_ev_tu; /* Tx underrun */
332
333 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
334 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
335 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
336 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
337 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
338 #endif /* WM_EVENT_COUNTERS */
339
340 bus_addr_t sc_tdt_reg; /* offset of TDT register */
341
342 int sc_txfree; /* number of free Tx descriptors */
343 int sc_txnext; /* next ready Tx descriptor */
344
345 int sc_txsfree; /* number of free Tx jobs */
346 int sc_txsnext; /* next free Tx job */
347 int sc_txsdirty; /* dirty Tx jobs */
348
349 /* These 5 variables are used only on the 82547. */
350 int sc_txfifo_size; /* Tx FIFO size */
351 int sc_txfifo_head; /* current head of FIFO */
352 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
353 int sc_txfifo_stall; /* Tx FIFO is stalled */
354 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
355
356 bus_addr_t sc_rdt_reg; /* offset of RDT register */
357
358 int sc_rxptr; /* next ready Rx descriptor/queue ent */
359 int sc_rxdiscard;
360 int sc_rxlen;
361 struct mbuf *sc_rxhead;
362 struct mbuf *sc_rxtail;
363 struct mbuf **sc_rxtailp;
364
365 uint32_t sc_ctrl; /* prototype CTRL register */
366 #if 0
367 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
368 #endif
369 uint32_t sc_icr; /* prototype interrupt bits */
370 uint32_t sc_itr; /* prototype intr throttling reg */
371 uint32_t sc_tctl; /* prototype TCTL register */
372 uint32_t sc_rctl; /* prototype RCTL register */
373 uint32_t sc_txcw; /* prototype TXCW register */
374 uint32_t sc_tipg; /* prototype TIPG register */
375 uint32_t sc_fcrtl; /* prototype FCRTL register */
376 uint32_t sc_pba; /* prototype PBA register */
377
378 int sc_tbi_linkup; /* TBI link status */
379 int sc_tbi_anegticks; /* autonegotiation ticks */
380 int sc_tbi_ticks; /* tbi ticks */
381 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
382 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
383
384 int sc_mchash_type; /* multicast filter offset */
385
386 #if NRND > 0
387 rndsource_element_t rnd_source; /* random source */
388 #endif
389 int sc_ich8_flash_base;
390 int sc_ich8_flash_bank_size;
391 };
392
393 #define WM_RXCHAIN_RESET(sc) \
394 do { \
395 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
396 *(sc)->sc_rxtailp = NULL; \
397 (sc)->sc_rxlen = 0; \
398 } while (/*CONSTCOND*/0)
399
400 #define WM_RXCHAIN_LINK(sc, m) \
401 do { \
402 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
403 (sc)->sc_rxtailp = &(m)->m_next; \
404 } while (/*CONSTCOND*/0)
405
406 /* sc_flags */
407 #define WM_F_HAS_MII 0x0001 /* has MII */
408 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
409 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
410 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
411 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
412 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
413 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
414 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
415 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
416 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
417 #define WM_F_CSA 0x0400 /* bus is CSA */
418 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
419 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
420 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
421
422 #ifdef WM_EVENT_COUNTERS
423 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
424 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
425 #else
426 #define WM_EVCNT_INCR(ev) /* nothing */
427 #define WM_EVCNT_ADD(ev, val) /* nothing */
428 #endif
429
430 #define CSR_READ(sc, reg) \
431 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
432 #define CSR_WRITE(sc, reg, val) \
433 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
434 #define CSR_WRITE_FLUSH(sc) \
435 (void) CSR_READ((sc), WMREG_STATUS)
436
437 #define ICH8_FLASH_READ32(sc, reg) \
438 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
439 #define ICH8_FLASH_WRITE32(sc, reg, data) \
440 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
441
442 #define ICH8_FLASH_READ16(sc, reg) \
443 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
444 #define ICH8_FLASH_WRITE16(sc, reg, data) \
445 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
446
447 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
448 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
449
450 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
451 #define WM_CDTXADDR_HI(sc, x) \
452 (sizeof(bus_addr_t) == 8 ? \
453 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
454
455 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
456 #define WM_CDRXADDR_HI(sc, x) \
457 (sizeof(bus_addr_t) == 8 ? \
458 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
459
460 #define WM_CDTXSYNC(sc, x, n, ops) \
461 do { \
462 int __x, __n; \
463 \
464 __x = (x); \
465 __n = (n); \
466 \
467 /* If it will wrap around, sync to the end of the ring. */ \
468 if ((__x + __n) > WM_NTXDESC(sc)) { \
469 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
470 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
471 (WM_NTXDESC(sc) - __x), (ops)); \
472 __n -= (WM_NTXDESC(sc) - __x); \
473 __x = 0; \
474 } \
475 \
476 /* Now sync whatever is left. */ \
477 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
478 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
479 } while (/*CONSTCOND*/0)
480
481 #define WM_CDRXSYNC(sc, x, ops) \
482 do { \
483 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
484 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
485 } while (/*CONSTCOND*/0)
486
487 #define WM_INIT_RXDESC(sc, x) \
488 do { \
489 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
490 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
491 struct mbuf *__m = __rxs->rxs_mbuf; \
492 \
493 /* \
494 * Note: We scoot the packet forward 2 bytes in the buffer \
495 * so that the payload after the Ethernet header is aligned \
496 * to a 4-byte boundary. \
497 * \
498 * XXX BRAINDAMAGE ALERT! \
499 * The stupid chip uses the same size for every buffer, which \
500 * is set in the Receive Control register. We are using the 2K \
501 * size option, but what we REALLY want is (2K - 2)! For this \
502 * reason, we can't "scoot" packets longer than the standard \
503 * Ethernet MTU. On strict-alignment platforms, if the total \
504 * size exceeds (2K - 2) we set align_tweak to 0 and let \
505 * the upper layer copy the headers. \
506 */ \
507 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
508 \
509 wm_set_dma_addr(&__rxd->wrx_addr, \
510 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
511 __rxd->wrx_len = 0; \
512 __rxd->wrx_cksum = 0; \
513 __rxd->wrx_status = 0; \
514 __rxd->wrx_errors = 0; \
515 __rxd->wrx_special = 0; \
516 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
517 \
518 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
519 } while (/*CONSTCOND*/0)
520
521 static void wm_start(struct ifnet *);
522 static void wm_watchdog(struct ifnet *);
523 static int wm_ioctl(struct ifnet *, u_long, void *);
524 static int wm_init(struct ifnet *);
525 static void wm_stop(struct ifnet *, int);
526
527 static void wm_reset(struct wm_softc *);
528 static void wm_rxdrain(struct wm_softc *);
529 static int wm_add_rxbuf(struct wm_softc *, int);
530 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
531 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
532 static int wm_validate_eeprom_checksum(struct wm_softc *);
533 static void wm_tick(void *);
534
535 static void wm_set_filter(struct wm_softc *);
536
537 static int wm_intr(void *);
538 static void wm_txintr(struct wm_softc *);
539 static void wm_rxintr(struct wm_softc *);
540 static void wm_linkintr(struct wm_softc *, uint32_t);
541
542 static void wm_tbi_mediainit(struct wm_softc *);
543 static int wm_tbi_mediachange(struct ifnet *);
544 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
545
546 static void wm_tbi_set_linkled(struct wm_softc *);
547 static void wm_tbi_check_link(struct wm_softc *);
548
549 static void wm_gmii_reset(struct wm_softc *);
550
551 static int wm_gmii_i82543_readreg(device_t, int, int);
552 static void wm_gmii_i82543_writereg(device_t, int, int, int);
553
554 static int wm_gmii_i82544_readreg(device_t, int, int);
555 static void wm_gmii_i82544_writereg(device_t, int, int, int);
556
557 static int wm_gmii_i80003_readreg(device_t, int, int);
558 static void wm_gmii_i80003_writereg(device_t, int, int, int);
559
560 static int wm_gmii_bm_readreg(device_t, int, int);
561 static void wm_gmii_bm_writereg(device_t, int, int, int);
562
563 static void wm_gmii_statchg(device_t);
564
565 static void wm_gmii_mediainit(struct wm_softc *);
566 static int wm_gmii_mediachange(struct ifnet *);
567 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
568
569 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
570 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
571
572 static int wm_match(device_t, cfdata_t, void *);
573 static void wm_attach(device_t, device_t, void *);
574 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
575 static void wm_get_auto_rd_done(struct wm_softc *);
576 static int wm_get_swsm_semaphore(struct wm_softc *);
577 static void wm_put_swsm_semaphore(struct wm_softc *);
578 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
579 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
580 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
581 static int wm_get_swfwhw_semaphore(struct wm_softc *);
582 static void wm_put_swfwhw_semaphore(struct wm_softc *);
583
584 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
585 static int32_t wm_ich8_cycle_init(struct wm_softc *);
586 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
587 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
588 uint32_t, uint16_t *);
589 static int32_t wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *);
590 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
591 static void wm_82547_txfifo_stall(void *);
592 static int wm_check_mng_mode(struct wm_softc *);
593 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
594 #if 0
595 static int wm_check_mng_mode_82574(struct wm_softc *);
596 #endif
597 static int wm_check_mng_mode_generic(struct wm_softc *);
598 static void wm_get_hw_control(struct wm_softc *);
599 static int wm_check_for_link(struct wm_softc *);
600
601 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
602 wm_match, wm_attach, NULL, NULL);
603
604
605 /*
606 * Devices supported by this driver.
607 */
608 static const struct wm_product {
609 pci_vendor_id_t wmp_vendor;
610 pci_product_id_t wmp_product;
611 const char *wmp_name;
612 wm_chip_type wmp_type;
613 int wmp_flags;
614 #define WMP_F_1000X 0x01
615 #define WMP_F_1000T 0x02
616 } wm_products[] = {
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
618 "Intel i82542 1000BASE-X Ethernet",
619 WM_T_82542_2_1, WMP_F_1000X },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
622 "Intel i82543GC 1000BASE-X Ethernet",
623 WM_T_82543, WMP_F_1000X },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
626 "Intel i82543GC 1000BASE-T Ethernet",
627 WM_T_82543, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
630 "Intel i82544EI 1000BASE-T Ethernet",
631 WM_T_82544, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
634 "Intel i82544EI 1000BASE-X Ethernet",
635 WM_T_82544, WMP_F_1000X },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
638 "Intel i82544GC 1000BASE-T Ethernet",
639 WM_T_82544, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
642 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
643 WM_T_82544, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
646 "Intel i82540EM 1000BASE-T Ethernet",
647 WM_T_82540, WMP_F_1000T },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
650 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
651 WM_T_82540, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
654 "Intel i82540EP 1000BASE-T Ethernet",
655 WM_T_82540, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
658 "Intel i82540EP 1000BASE-T Ethernet",
659 WM_T_82540, WMP_F_1000T },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
662 "Intel i82540EP 1000BASE-T Ethernet",
663 WM_T_82540, WMP_F_1000T },
664
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
666 "Intel i82545EM 1000BASE-T Ethernet",
667 WM_T_82545, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
670 "Intel i82545GM 1000BASE-T Ethernet",
671 WM_T_82545_3, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
674 "Intel i82545GM 1000BASE-X Ethernet",
675 WM_T_82545_3, WMP_F_1000X },
676 #if 0
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
678 "Intel i82545GM Gigabit Ethernet (SERDES)",
679 WM_T_82545_3, WMP_F_SERDES },
680 #endif
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
682 "Intel i82546EB 1000BASE-T Ethernet",
683 WM_T_82546, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
686 "Intel i82546EB 1000BASE-T Ethernet",
687 WM_T_82546, WMP_F_1000T },
688
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
690 "Intel i82545EM 1000BASE-X Ethernet",
691 WM_T_82545, WMP_F_1000X },
692
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
694 "Intel i82546EB 1000BASE-X Ethernet",
695 WM_T_82546, WMP_F_1000X },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
698 "Intel i82546GB 1000BASE-T Ethernet",
699 WM_T_82546_3, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
702 "Intel i82546GB 1000BASE-X Ethernet",
703 WM_T_82546_3, WMP_F_1000X },
704 #if 0
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
706 "Intel i82546GB Gigabit Ethernet (SERDES)",
707 WM_T_82546_3, WMP_F_SERDES },
708 #endif
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
710 "i82546GB quad-port Gigabit Ethernet",
711 WM_T_82546_3, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
714 "i82546GB quad-port Gigabit Ethernet (KSP3)",
715 WM_T_82546_3, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
718 "Intel PRO/1000MT (82546GB)",
719 WM_T_82546_3, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
722 "Intel i82541EI 1000BASE-T Ethernet",
723 WM_T_82541, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
726 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
727 WM_T_82541, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
730 "Intel i82541EI Mobile 1000BASE-T Ethernet",
731 WM_T_82541, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
734 "Intel i82541ER 1000BASE-T Ethernet",
735 WM_T_82541_2, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
738 "Intel i82541GI 1000BASE-T Ethernet",
739 WM_T_82541_2, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
742 "Intel i82541GI Mobile 1000BASE-T Ethernet",
743 WM_T_82541_2, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
746 "Intel i82541PI 1000BASE-T Ethernet",
747 WM_T_82541_2, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
750 "Intel i82547EI 1000BASE-T Ethernet",
751 WM_T_82547, WMP_F_1000T },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
754 "Intel i82547EI Mobile 1000BASE-T Ethernet",
755 WM_T_82547, WMP_F_1000T },
756
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
758 "Intel i82547GI 1000BASE-T Ethernet",
759 WM_T_82547_2, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
762 "Intel PRO/1000 PT (82571EB)",
763 WM_T_82571, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
766 "Intel PRO/1000 PF (82571EB)",
767 WM_T_82571, WMP_F_1000X },
768 #if 0
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
770 "Intel PRO/1000 PB (82571EB)",
771 WM_T_82571, WMP_F_SERDES },
772 #endif
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
774 "Intel PRO/1000 QT (82571EB)",
775 WM_T_82571, WMP_F_1000T },
776
777 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
778 "Intel i82572EI 1000baseT Ethernet",
779 WM_T_82572, WMP_F_1000T },
780
781 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
782 "Intel PRO/1000 PT Quad Port Server Adapter",
783 WM_T_82571, WMP_F_1000T, },
784
785 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
786 "Intel i82572EI 1000baseX Ethernet",
787 WM_T_82572, WMP_F_1000X },
788 #if 0
789 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
790 "Intel i82572EI Gigabit Ethernet (SERDES)",
791 WM_T_82572, WMP_F_SERDES },
792 #endif
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
795 "Intel i82572EI 1000baseT Ethernet",
796 WM_T_82572, WMP_F_1000T },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
799 "Intel i82573E",
800 WM_T_82573, WMP_F_1000T },
801
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
803 "Intel i82573E IAMT",
804 WM_T_82573, WMP_F_1000T },
805
806 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
807 "Intel i82573L Gigabit Ethernet",
808 WM_T_82573, WMP_F_1000T },
809
810 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
811 "Intel i82574L",
812 WM_T_82574, WMP_F_1000T },
813
814 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
815 "i80003 dual 1000baseT Ethernet",
816 WM_T_80003, WMP_F_1000T },
817
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
819 "i80003 dual 1000baseX Ethernet",
820 WM_T_80003, WMP_F_1000T },
821 #if 0
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
823 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
824 WM_T_80003, WMP_F_SERDES },
825 #endif
826
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
828 "Intel i80003 1000baseT Ethernet",
829 WM_T_80003, WMP_F_1000T },
830 #if 0
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
832 "Intel i80003 Gigabit Ethernet (SERDES)",
833 WM_T_80003, WMP_F_SERDES },
834 #endif
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
836 "Intel i82801H (M_AMT) LAN Controller",
837 WM_T_ICH8, WMP_F_1000T },
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
839 "Intel i82801H (AMT) LAN Controller",
840 WM_T_ICH8, WMP_F_1000T },
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
842 "Intel i82801H LAN Controller",
843 WM_T_ICH8, WMP_F_1000T },
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
845 "Intel i82801H (IFE) LAN Controller",
846 WM_T_ICH8, WMP_F_1000T },
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
848 "Intel i82801H (M) LAN Controller",
849 WM_T_ICH8, WMP_F_1000T },
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
851 "Intel i82801H IFE (GT) LAN Controller",
852 WM_T_ICH8, WMP_F_1000T },
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
854 "Intel i82801H IFE (G) LAN Controller",
855 WM_T_ICH8, WMP_F_1000T },
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
857 "82801I (AMT) LAN Controller",
858 WM_T_ICH9, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
860 "82801I LAN Controller",
861 WM_T_ICH9, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
863 "82801I (G) LAN Controller",
864 WM_T_ICH9, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
866 "82801I (GT) LAN Controller",
867 WM_T_ICH9, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
869 "82801I (C) LAN Controller",
870 WM_T_ICH9, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
872 "82801I mobile LAN Controller",
873 WM_T_ICH9, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
875 "82801I mobile (V) LAN Controller",
876 WM_T_ICH9, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
878 "82801I mobile (AMT) LAN Controller",
879 WM_T_ICH9, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3,
881 "82567LM-3 LAN Controller",
882 WM_T_ICH10, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3,
884 "82567LF-3 LAN Controller",
885 WM_T_ICH10, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801J_D_BM_LF,
887 "i82801J (LF) LAN Controller",
888 WM_T_ICH10, WMP_F_1000T },
889 { 0, 0,
890 NULL,
891 0, 0 },
892 };
893
894 #ifdef WM_EVENT_COUNTERS
895 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
896 #endif /* WM_EVENT_COUNTERS */
897
898 #if 0 /* Not currently used */
899 static inline uint32_t
900 wm_io_read(struct wm_softc *sc, int reg)
901 {
902
903 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
904 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
905 }
906 #endif
907
908 static inline void
909 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
910 {
911
912 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
913 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
914 }
915
916 static inline void
917 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
918 {
919 wa->wa_low = htole32(v & 0xffffffffU);
920 if (sizeof(bus_addr_t) == 8)
921 wa->wa_high = htole32((uint64_t) v >> 32);
922 else
923 wa->wa_high = 0;
924 }
925
926 static const struct wm_product *
927 wm_lookup(const struct pci_attach_args *pa)
928 {
929 const struct wm_product *wmp;
930
931 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
932 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
933 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
934 return (wmp);
935 }
936 return (NULL);
937 }
938
939 static int
940 wm_match(device_t parent, cfdata_t cf, void *aux)
941 {
942 struct pci_attach_args *pa = aux;
943
944 if (wm_lookup(pa) != NULL)
945 return (1);
946
947 return (0);
948 }
949
950 static void
951 wm_attach(device_t parent, device_t self, void *aux)
952 {
953 struct wm_softc *sc = device_private(self);
954 struct pci_attach_args *pa = aux;
955 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
956 pci_chipset_tag_t pc = pa->pa_pc;
957 pci_intr_handle_t ih;
958 size_t cdata_size;
959 const char *intrstr = NULL;
960 const char *eetype, *xname;
961 bus_space_tag_t memt;
962 bus_space_handle_t memh;
963 bus_dma_segment_t seg;
964 int memh_valid;
965 int i, rseg, error;
966 const struct wm_product *wmp;
967 prop_data_t ea;
968 prop_number_t pn;
969 uint8_t enaddr[ETHER_ADDR_LEN];
970 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
971 pcireg_t preg, memtype;
972 uint32_t reg;
973
974 sc->sc_dev = self;
975 callout_init(&sc->sc_tick_ch, 0);
976
977 wmp = wm_lookup(pa);
978 if (wmp == NULL) {
979 printf("\n");
980 panic("wm_attach: impossible");
981 }
982
983 sc->sc_pc = pa->pa_pc;
984 sc->sc_pcitag = pa->pa_tag;
985
986 if (pci_dma64_available(pa))
987 sc->sc_dmat = pa->pa_dmat64;
988 else
989 sc->sc_dmat = pa->pa_dmat;
990
991 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
992 aprint_naive(": Ethernet controller\n");
993 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
994
995 sc->sc_type = wmp->wmp_type;
996 if (sc->sc_type < WM_T_82543) {
997 if (preg < 2) {
998 aprint_error_dev(sc->sc_dev,
999 "i82542 must be at least rev. 2\n");
1000 return;
1001 }
1002 if (preg < 3)
1003 sc->sc_type = WM_T_82542_2_0;
1004 }
1005
1006 /*
1007 * Map the device. All devices support memory-mapped acccess,
1008 * and it is really required for normal operation.
1009 */
1010 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1011 switch (memtype) {
1012 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1013 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1014 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1015 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1016 break;
1017 default:
1018 memh_valid = 0;
1019 }
1020
1021 if (memh_valid) {
1022 sc->sc_st = memt;
1023 sc->sc_sh = memh;
1024 } else {
1025 aprint_error_dev(sc->sc_dev,
1026 "unable to map device registers\n");
1027 return;
1028 }
1029
1030 /*
1031 * In addition, i82544 and later support I/O mapped indirect
1032 * register access. It is not desirable (nor supported in
1033 * this driver) to use it for normal operation, though it is
1034 * required to work around bugs in some chip versions.
1035 */
1036 if (sc->sc_type >= WM_T_82544) {
1037 /* First we have to find the I/O BAR. */
1038 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1039 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1040 PCI_MAPREG_TYPE_IO)
1041 break;
1042 }
1043 if (i == PCI_MAPREG_END)
1044 aprint_error_dev(sc->sc_dev,
1045 "WARNING: unable to find I/O BAR\n");
1046 else {
1047 /*
1048 * The i8254x doesn't apparently respond when the
1049 * I/O BAR is 0, which looks somewhat like it's not
1050 * been configured.
1051 */
1052 preg = pci_conf_read(pc, pa->pa_tag, i);
1053 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1054 aprint_error_dev(sc->sc_dev,
1055 "WARNING: I/O BAR at zero.\n");
1056 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1057 0, &sc->sc_iot, &sc->sc_ioh,
1058 NULL, NULL) == 0) {
1059 sc->sc_flags |= WM_F_IOH_VALID;
1060 } else {
1061 aprint_error_dev(sc->sc_dev,
1062 "WARNING: unable to map I/O space\n");
1063 }
1064 }
1065
1066 }
1067
1068 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1069 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1070 preg |= PCI_COMMAND_MASTER_ENABLE;
1071 if (sc->sc_type < WM_T_82542_2_1)
1072 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1073 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1074
1075 /* power up chip */
1076 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1077 NULL)) && error != EOPNOTSUPP) {
1078 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1079 return;
1080 }
1081
1082 /*
1083 * Map and establish our interrupt.
1084 */
1085 if (pci_intr_map(pa, &ih)) {
1086 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1087 return;
1088 }
1089 intrstr = pci_intr_string(pc, ih);
1090 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1091 if (sc->sc_ih == NULL) {
1092 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1093 if (intrstr != NULL)
1094 aprint_normal(" at %s", intrstr);
1095 aprint_normal("\n");
1096 return;
1097 }
1098 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1099
1100 /*
1101 * Determine a few things about the bus we're connected to.
1102 */
1103 if (sc->sc_type < WM_T_82543) {
1104 /* We don't really know the bus characteristics here. */
1105 sc->sc_bus_speed = 33;
1106 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1107 /*
1108 * CSA (Communication Streaming Architecture) is about as fast
1109 * a 32-bit 66MHz PCI Bus.
1110 */
1111 sc->sc_flags |= WM_F_CSA;
1112 sc->sc_bus_speed = 66;
1113 aprint_verbose_dev(sc->sc_dev,
1114 "Communication Streaming Architecture\n");
1115 if (sc->sc_type == WM_T_82547) {
1116 callout_init(&sc->sc_txfifo_ch, 0);
1117 callout_setfunc(&sc->sc_txfifo_ch,
1118 wm_82547_txfifo_stall, sc);
1119 aprint_verbose_dev(sc->sc_dev,
1120 "using 82547 Tx FIFO stall work-around\n");
1121 }
1122 } else if (sc->sc_type >= WM_T_82571) {
1123 sc->sc_flags |= WM_F_PCIE;
1124 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1125 && (sc->sc_type != WM_T_ICH10))
1126 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1127 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1128 } else {
1129 reg = CSR_READ(sc, WMREG_STATUS);
1130 if (reg & STATUS_BUS64)
1131 sc->sc_flags |= WM_F_BUS64;
1132 if ((reg & STATUS_PCIX_MODE) != 0) {
1133 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1134
1135 sc->sc_flags |= WM_F_PCIX;
1136 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1137 PCI_CAP_PCIX,
1138 &sc->sc_pcix_offset, NULL) == 0)
1139 aprint_error_dev(sc->sc_dev,
1140 "unable to find PCIX capability\n");
1141 else if (sc->sc_type != WM_T_82545_3 &&
1142 sc->sc_type != WM_T_82546_3) {
1143 /*
1144 * Work around a problem caused by the BIOS
1145 * setting the max memory read byte count
1146 * incorrectly.
1147 */
1148 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1149 sc->sc_pcix_offset + PCI_PCIX_CMD);
1150 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1151 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1152
1153 bytecnt =
1154 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1155 PCI_PCIX_CMD_BYTECNT_SHIFT;
1156 maxb =
1157 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1158 PCI_PCIX_STATUS_MAXB_SHIFT;
1159 if (bytecnt > maxb) {
1160 aprint_verbose_dev(sc->sc_dev,
1161 "resetting PCI-X MMRBC: %d -> %d\n",
1162 512 << bytecnt, 512 << maxb);
1163 pcix_cmd = (pcix_cmd &
1164 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1165 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1166 pci_conf_write(pa->pa_pc, pa->pa_tag,
1167 sc->sc_pcix_offset + PCI_PCIX_CMD,
1168 pcix_cmd);
1169 }
1170 }
1171 }
1172 /*
1173 * The quad port adapter is special; it has a PCIX-PCIX
1174 * bridge on the board, and can run the secondary bus at
1175 * a higher speed.
1176 */
1177 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1178 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1179 : 66;
1180 } else if (sc->sc_flags & WM_F_PCIX) {
1181 switch (reg & STATUS_PCIXSPD_MASK) {
1182 case STATUS_PCIXSPD_50_66:
1183 sc->sc_bus_speed = 66;
1184 break;
1185 case STATUS_PCIXSPD_66_100:
1186 sc->sc_bus_speed = 100;
1187 break;
1188 case STATUS_PCIXSPD_100_133:
1189 sc->sc_bus_speed = 133;
1190 break;
1191 default:
1192 aprint_error_dev(sc->sc_dev,
1193 "unknown PCIXSPD %d; assuming 66MHz\n",
1194 reg & STATUS_PCIXSPD_MASK);
1195 sc->sc_bus_speed = 66;
1196 }
1197 } else
1198 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1199 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1200 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1201 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1202 }
1203
1204 /*
1205 * Allocate the control data structures, and create and load the
1206 * DMA map for it.
1207 *
1208 * NOTE: All Tx descriptors must be in the same 4G segment of
1209 * memory. So must Rx descriptors. We simplify by allocating
1210 * both sets within the same 4G segment.
1211 */
1212 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1213 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1214 cdata_size = sc->sc_type < WM_T_82544 ?
1215 sizeof(struct wm_control_data_82542) :
1216 sizeof(struct wm_control_data_82544);
1217 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1218 (bus_size_t) 0x100000000ULL,
1219 &seg, 1, &rseg, 0)) != 0) {
1220 aprint_error_dev(sc->sc_dev,
1221 "unable to allocate control data, error = %d\n",
1222 error);
1223 goto fail_0;
1224 }
1225
1226 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1227 (void **)&sc->sc_control_data,
1228 BUS_DMA_COHERENT)) != 0) {
1229 aprint_error_dev(sc->sc_dev,
1230 "unable to map control data, error = %d\n", error);
1231 goto fail_1;
1232 }
1233
1234 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1235 0, 0, &sc->sc_cddmamap)) != 0) {
1236 aprint_error_dev(sc->sc_dev,
1237 "unable to create control data DMA map, error = %d\n",
1238 error);
1239 goto fail_2;
1240 }
1241
1242 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1243 sc->sc_control_data, cdata_size, NULL,
1244 0)) != 0) {
1245 aprint_error_dev(sc->sc_dev,
1246 "unable to load control data DMA map, error = %d\n",
1247 error);
1248 goto fail_3;
1249 }
1250
1251
1252 /*
1253 * Create the transmit buffer DMA maps.
1254 */
1255 WM_TXQUEUELEN(sc) =
1256 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1257 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1258 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1259 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1260 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1261 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1262 aprint_error_dev(sc->sc_dev,
1263 "unable to create Tx DMA map %d, error = %d\n",
1264 i, error);
1265 goto fail_4;
1266 }
1267 }
1268
1269 /*
1270 * Create the receive buffer DMA maps.
1271 */
1272 for (i = 0; i < WM_NRXDESC; i++) {
1273 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1274 MCLBYTES, 0, 0,
1275 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1276 aprint_error_dev(sc->sc_dev,
1277 "unable to create Rx DMA map %d error = %d\n",
1278 i, error);
1279 goto fail_5;
1280 }
1281 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1282 }
1283
1284 /* clear interesting stat counters */
1285 CSR_READ(sc, WMREG_COLC);
1286 CSR_READ(sc, WMREG_RXERRC);
1287
1288 /*
1289 * Reset the chip to a known state.
1290 */
1291 wm_reset(sc);
1292
1293 switch (sc->sc_type) {
1294 case WM_T_82571:
1295 case WM_T_82572:
1296 case WM_T_82573:
1297 case WM_T_82574:
1298 case WM_T_80003:
1299 case WM_T_ICH8:
1300 case WM_T_ICH9:
1301 case WM_T_ICH10:
1302 if (wm_check_mng_mode(sc) != 0)
1303 wm_get_hw_control(sc);
1304 break;
1305 default:
1306 break;
1307 }
1308
1309 /*
1310 * Get some information about the EEPROM.
1311 */
1312 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
1313 || (sc->sc_type == WM_T_ICH10)) {
1314 uint32_t flash_size;
1315 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1316 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1317 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1318 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1319 aprint_error_dev(sc->sc_dev,
1320 "can't map FLASH registers\n");
1321 return;
1322 }
1323 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1324 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1325 ICH_FLASH_SECTOR_SIZE;
1326 sc->sc_ich8_flash_bank_size =
1327 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1328 sc->sc_ich8_flash_bank_size -=
1329 (flash_size & ICH_GFPREG_BASE_MASK);
1330 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1331 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1332 } else if (sc->sc_type == WM_T_80003)
1333 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1334 else if (sc->sc_type == WM_T_82573)
1335 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1336 else if (sc->sc_type == WM_T_82574)
1337 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1338 else if (sc->sc_type > WM_T_82544)
1339 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1340
1341 if (sc->sc_type <= WM_T_82544)
1342 sc->sc_ee_addrbits = 6;
1343 else if (sc->sc_type <= WM_T_82546_3) {
1344 reg = CSR_READ(sc, WMREG_EECD);
1345 if (reg & EECD_EE_SIZE)
1346 sc->sc_ee_addrbits = 8;
1347 else
1348 sc->sc_ee_addrbits = 6;
1349 } else if (sc->sc_type <= WM_T_82547_2) {
1350 reg = CSR_READ(sc, WMREG_EECD);
1351 if (reg & EECD_EE_TYPE) {
1352 sc->sc_flags |= WM_F_EEPROM_SPI;
1353 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1354 } else
1355 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1356 } else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1357 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1358 sc->sc_flags |= WM_F_EEPROM_FLASH;
1359 } else {
1360 /* Assume everything else is SPI. */
1361 reg = CSR_READ(sc, WMREG_EECD);
1362 sc->sc_flags |= WM_F_EEPROM_SPI;
1363 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1364 }
1365
1366 /*
1367 * Defer printing the EEPROM type until after verifying the checksum
1368 * This allows the EEPROM type to be printed correctly in the case
1369 * that no EEPROM is attached.
1370 */
1371
1372 /*
1373 * Validate the EEPROM checksum. If the checksum fails, flag this for
1374 * later, so we can fail future reads from the EEPROM.
1375 */
1376 if (wm_validate_eeprom_checksum(sc)) {
1377 /*
1378 * Read twice again because some PCI-e parts fail the first
1379 * check due to the link being in sleep state.
1380 */
1381 if (wm_validate_eeprom_checksum(sc))
1382 sc->sc_flags |= WM_F_EEPROM_INVALID;
1383 }
1384
1385 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1386 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1387 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1388 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1389 } else {
1390 if (sc->sc_flags & WM_F_EEPROM_SPI)
1391 eetype = "SPI";
1392 else
1393 eetype = "MicroWire";
1394 aprint_verbose_dev(sc->sc_dev,
1395 "%u word (%d address bits) %s EEPROM\n",
1396 1U << sc->sc_ee_addrbits,
1397 sc->sc_ee_addrbits, eetype);
1398 }
1399
1400 /*
1401 * Read the Ethernet address from the EEPROM, if not first found
1402 * in device properties.
1403 */
1404 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1405 if (ea != NULL) {
1406 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1407 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1408 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1409 } else {
1410 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1411 sizeof(myea) / sizeof(myea[0]), myea)) {
1412 aprint_error_dev(sc->sc_dev,
1413 "unable to read Ethernet address\n");
1414 return;
1415 }
1416 enaddr[0] = myea[0] & 0xff;
1417 enaddr[1] = myea[0] >> 8;
1418 enaddr[2] = myea[1] & 0xff;
1419 enaddr[3] = myea[1] >> 8;
1420 enaddr[4] = myea[2] & 0xff;
1421 enaddr[5] = myea[2] >> 8;
1422 }
1423
1424 /*
1425 * Toggle the LSB of the MAC address on the second port
1426 * of the dual port controller.
1427 */
1428 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1429 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1430 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1431 enaddr[5] ^= 1;
1432 }
1433
1434 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1435 ether_sprintf(enaddr));
1436
1437 /*
1438 * Read the config info from the EEPROM, and set up various
1439 * bits in the control registers based on their contents.
1440 */
1441 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1442 "i82543-cfg1");
1443 if (pn != NULL) {
1444 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1445 cfg1 = (uint16_t) prop_number_integer_value(pn);
1446 } else {
1447 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1448 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1449 return;
1450 }
1451 }
1452
1453 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1454 "i82543-cfg2");
1455 if (pn != NULL) {
1456 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1457 cfg2 = (uint16_t) prop_number_integer_value(pn);
1458 } else {
1459 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1460 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1461 return;
1462 }
1463 }
1464
1465 if (sc->sc_type >= WM_T_82544) {
1466 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1467 "i82543-swdpin");
1468 if (pn != NULL) {
1469 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1470 swdpin = (uint16_t) prop_number_integer_value(pn);
1471 } else {
1472 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1473 aprint_error_dev(sc->sc_dev,
1474 "unable to read SWDPIN\n");
1475 return;
1476 }
1477 }
1478 }
1479
1480 if (cfg1 & EEPROM_CFG1_ILOS)
1481 sc->sc_ctrl |= CTRL_ILOS;
1482 if (sc->sc_type >= WM_T_82544) {
1483 sc->sc_ctrl |=
1484 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1485 CTRL_SWDPIO_SHIFT;
1486 sc->sc_ctrl |=
1487 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1488 CTRL_SWDPINS_SHIFT;
1489 } else {
1490 sc->sc_ctrl |=
1491 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1492 CTRL_SWDPIO_SHIFT;
1493 }
1494
1495 #if 0
1496 if (sc->sc_type >= WM_T_82544) {
1497 if (cfg1 & EEPROM_CFG1_IPS0)
1498 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1499 if (cfg1 & EEPROM_CFG1_IPS1)
1500 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1501 sc->sc_ctrl_ext |=
1502 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1503 CTRL_EXT_SWDPIO_SHIFT;
1504 sc->sc_ctrl_ext |=
1505 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1506 CTRL_EXT_SWDPINS_SHIFT;
1507 } else {
1508 sc->sc_ctrl_ext |=
1509 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1510 CTRL_EXT_SWDPIO_SHIFT;
1511 }
1512 #endif
1513
1514 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1515 #if 0
1516 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1517 #endif
1518
1519 /*
1520 * Set up some register offsets that are different between
1521 * the i82542 and the i82543 and later chips.
1522 */
1523 if (sc->sc_type < WM_T_82543) {
1524 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1525 sc->sc_tdt_reg = WMREG_OLD_TDT;
1526 } else {
1527 sc->sc_rdt_reg = WMREG_RDT;
1528 sc->sc_tdt_reg = WMREG_TDT;
1529 }
1530
1531 /*
1532 * Determine if we're TBI or GMII mode, and initialize the
1533 * media structures accordingly.
1534 */
1535 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1536 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1537 || sc->sc_type == WM_T_82574) {
1538 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1539 wm_gmii_mediainit(sc);
1540 } else if (sc->sc_type < WM_T_82543 ||
1541 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1542 if (wmp->wmp_flags & WMP_F_1000T)
1543 aprint_error_dev(sc->sc_dev,
1544 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1545 wm_tbi_mediainit(sc);
1546 } else {
1547 if (wmp->wmp_flags & WMP_F_1000X)
1548 aprint_error_dev(sc->sc_dev,
1549 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1550 wm_gmii_mediainit(sc);
1551 }
1552
1553 ifp = &sc->sc_ethercom.ec_if;
1554 xname = device_xname(sc->sc_dev);
1555 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1556 ifp->if_softc = sc;
1557 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1558 ifp->if_ioctl = wm_ioctl;
1559 ifp->if_start = wm_start;
1560 ifp->if_watchdog = wm_watchdog;
1561 ifp->if_init = wm_init;
1562 ifp->if_stop = wm_stop;
1563 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1564 IFQ_SET_READY(&ifp->if_snd);
1565
1566 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1567 sc->sc_type != WM_T_ICH8)
1568 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1569
1570 /*
1571 * If we're a i82543 or greater, we can support VLANs.
1572 */
1573 if (sc->sc_type >= WM_T_82543)
1574 sc->sc_ethercom.ec_capabilities |=
1575 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1576
1577 /*
1578 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1579 * on i82543 and later.
1580 */
1581 if (sc->sc_type >= WM_T_82543) {
1582 ifp->if_capabilities |=
1583 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1584 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1585 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1586 IFCAP_CSUM_TCPv6_Tx |
1587 IFCAP_CSUM_UDPv6_Tx;
1588 }
1589
1590 /*
1591 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1592 *
1593 * 82541GI (8086:1076) ... no
1594 * 82572EI (8086:10b9) ... yes
1595 */
1596 if (sc->sc_type >= WM_T_82571) {
1597 ifp->if_capabilities |=
1598 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1599 }
1600
1601 /*
1602 * If we're a i82544 or greater (except i82547), we can do
1603 * TCP segmentation offload.
1604 */
1605 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1606 ifp->if_capabilities |= IFCAP_TSOv4;
1607 }
1608
1609 if (sc->sc_type >= WM_T_82571) {
1610 ifp->if_capabilities |= IFCAP_TSOv6;
1611 }
1612
1613 /*
1614 * Attach the interface.
1615 */
1616 if_attach(ifp);
1617 ether_ifattach(ifp, enaddr);
1618 #if NRND > 0
1619 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1620 #endif
1621
1622 #ifdef WM_EVENT_COUNTERS
1623 /* Attach event counters. */
1624 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1625 NULL, xname, "txsstall");
1626 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1627 NULL, xname, "txdstall");
1628 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1629 NULL, xname, "txfifo_stall");
1630 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1631 NULL, xname, "txdw");
1632 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1633 NULL, xname, "txqe");
1634 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1635 NULL, xname, "rxintr");
1636 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1637 NULL, xname, "linkintr");
1638
1639 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1640 NULL, xname, "rxipsum");
1641 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1642 NULL, xname, "rxtusum");
1643 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1644 NULL, xname, "txipsum");
1645 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1646 NULL, xname, "txtusum");
1647 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1648 NULL, xname, "txtusum6");
1649
1650 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1651 NULL, xname, "txtso");
1652 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1653 NULL, xname, "txtso6");
1654 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1655 NULL, xname, "txtsopain");
1656
1657 for (i = 0; i < WM_NTXSEGS; i++) {
1658 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1659 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1660 NULL, xname, wm_txseg_evcnt_names[i]);
1661 }
1662
1663 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1664 NULL, xname, "txdrop");
1665
1666 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1667 NULL, xname, "tu");
1668
1669 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1670 NULL, xname, "tx_xoff");
1671 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1672 NULL, xname, "tx_xon");
1673 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1674 NULL, xname, "rx_xoff");
1675 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1676 NULL, xname, "rx_xon");
1677 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1678 NULL, xname, "rx_macctl");
1679 #endif /* WM_EVENT_COUNTERS */
1680
1681 if (!pmf_device_register(self, NULL, NULL))
1682 aprint_error_dev(self, "couldn't establish power handler\n");
1683 else
1684 pmf_class_network_register(self, ifp);
1685
1686 return;
1687
1688 /*
1689 * Free any resources we've allocated during the failed attach
1690 * attempt. Do this in reverse order and fall through.
1691 */
1692 fail_5:
1693 for (i = 0; i < WM_NRXDESC; i++) {
1694 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1695 bus_dmamap_destroy(sc->sc_dmat,
1696 sc->sc_rxsoft[i].rxs_dmamap);
1697 }
1698 fail_4:
1699 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1700 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1701 bus_dmamap_destroy(sc->sc_dmat,
1702 sc->sc_txsoft[i].txs_dmamap);
1703 }
1704 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1705 fail_3:
1706 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1707 fail_2:
1708 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1709 cdata_size);
1710 fail_1:
1711 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1712 fail_0:
1713 return;
1714 }
1715
1716 /*
1717 * wm_tx_offload:
1718 *
1719 * Set up TCP/IP checksumming parameters for the
1720 * specified packet.
1721 */
1722 static int
1723 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1724 uint8_t *fieldsp)
1725 {
1726 struct mbuf *m0 = txs->txs_mbuf;
1727 struct livengood_tcpip_ctxdesc *t;
1728 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1729 uint32_t ipcse;
1730 struct ether_header *eh;
1731 int offset, iphl;
1732 uint8_t fields;
1733
1734 /*
1735 * XXX It would be nice if the mbuf pkthdr had offset
1736 * fields for the protocol headers.
1737 */
1738
1739 eh = mtod(m0, struct ether_header *);
1740 switch (htons(eh->ether_type)) {
1741 case ETHERTYPE_IP:
1742 case ETHERTYPE_IPV6:
1743 offset = ETHER_HDR_LEN;
1744 break;
1745
1746 case ETHERTYPE_VLAN:
1747 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1748 break;
1749
1750 default:
1751 /*
1752 * Don't support this protocol or encapsulation.
1753 */
1754 *fieldsp = 0;
1755 *cmdp = 0;
1756 return (0);
1757 }
1758
1759 if ((m0->m_pkthdr.csum_flags &
1760 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1761 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1762 } else {
1763 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1764 }
1765 ipcse = offset + iphl - 1;
1766
1767 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1768 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1769 seg = 0;
1770 fields = 0;
1771
1772 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1773 int hlen = offset + iphl;
1774 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1775
1776 if (__predict_false(m0->m_len <
1777 (hlen + sizeof(struct tcphdr)))) {
1778 /*
1779 * TCP/IP headers are not in the first mbuf; we need
1780 * to do this the slow and painful way. Let's just
1781 * hope this doesn't happen very often.
1782 */
1783 struct tcphdr th;
1784
1785 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1786
1787 m_copydata(m0, hlen, sizeof(th), &th);
1788 if (v4) {
1789 struct ip ip;
1790
1791 m_copydata(m0, offset, sizeof(ip), &ip);
1792 ip.ip_len = 0;
1793 m_copyback(m0,
1794 offset + offsetof(struct ip, ip_len),
1795 sizeof(ip.ip_len), &ip.ip_len);
1796 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1797 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1798 } else {
1799 struct ip6_hdr ip6;
1800
1801 m_copydata(m0, offset, sizeof(ip6), &ip6);
1802 ip6.ip6_plen = 0;
1803 m_copyback(m0,
1804 offset + offsetof(struct ip6_hdr, ip6_plen),
1805 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1806 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1807 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1808 }
1809 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1810 sizeof(th.th_sum), &th.th_sum);
1811
1812 hlen += th.th_off << 2;
1813 } else {
1814 /*
1815 * TCP/IP headers are in the first mbuf; we can do
1816 * this the easy way.
1817 */
1818 struct tcphdr *th;
1819
1820 if (v4) {
1821 struct ip *ip =
1822 (void *)(mtod(m0, char *) + offset);
1823 th = (void *)(mtod(m0, char *) + hlen);
1824
1825 ip->ip_len = 0;
1826 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1827 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1828 } else {
1829 struct ip6_hdr *ip6 =
1830 (void *)(mtod(m0, char *) + offset);
1831 th = (void *)(mtod(m0, char *) + hlen);
1832
1833 ip6->ip6_plen = 0;
1834 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1835 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1836 }
1837 hlen += th->th_off << 2;
1838 }
1839
1840 if (v4) {
1841 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1842 cmdlen |= WTX_TCPIP_CMD_IP;
1843 } else {
1844 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1845 ipcse = 0;
1846 }
1847 cmd |= WTX_TCPIP_CMD_TSE;
1848 cmdlen |= WTX_TCPIP_CMD_TSE |
1849 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1850 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1851 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1852 }
1853
1854 /*
1855 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1856 * offload feature, if we load the context descriptor, we
1857 * MUST provide valid values for IPCSS and TUCSS fields.
1858 */
1859
1860 ipcs = WTX_TCPIP_IPCSS(offset) |
1861 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1862 WTX_TCPIP_IPCSE(ipcse);
1863 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1864 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1865 fields |= WTX_IXSM;
1866 }
1867
1868 offset += iphl;
1869
1870 if (m0->m_pkthdr.csum_flags &
1871 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1872 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1873 fields |= WTX_TXSM;
1874 tucs = WTX_TCPIP_TUCSS(offset) |
1875 WTX_TCPIP_TUCSO(offset +
1876 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1877 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1878 } else if ((m0->m_pkthdr.csum_flags &
1879 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1880 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1881 fields |= WTX_TXSM;
1882 tucs = WTX_TCPIP_TUCSS(offset) |
1883 WTX_TCPIP_TUCSO(offset +
1884 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1885 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1886 } else {
1887 /* Just initialize it to a valid TCP context. */
1888 tucs = WTX_TCPIP_TUCSS(offset) |
1889 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1890 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1891 }
1892
1893 /* Fill in the context descriptor. */
1894 t = (struct livengood_tcpip_ctxdesc *)
1895 &sc->sc_txdescs[sc->sc_txnext];
1896 t->tcpip_ipcs = htole32(ipcs);
1897 t->tcpip_tucs = htole32(tucs);
1898 t->tcpip_cmdlen = htole32(cmdlen);
1899 t->tcpip_seg = htole32(seg);
1900 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1901
1902 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1903 txs->txs_ndesc++;
1904
1905 *cmdp = cmd;
1906 *fieldsp = fields;
1907
1908 return (0);
1909 }
1910
1911 static void
1912 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1913 {
1914 struct mbuf *m;
1915 int i;
1916
1917 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1918 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1919 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1920 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1921 m->m_data, m->m_len, m->m_flags);
1922 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1923 i, i == 1 ? "" : "s");
1924 }
1925
1926 /*
1927 * wm_82547_txfifo_stall:
1928 *
1929 * Callout used to wait for the 82547 Tx FIFO to drain,
1930 * reset the FIFO pointers, and restart packet transmission.
1931 */
1932 static void
1933 wm_82547_txfifo_stall(void *arg)
1934 {
1935 struct wm_softc *sc = arg;
1936 int s;
1937
1938 s = splnet();
1939
1940 if (sc->sc_txfifo_stall) {
1941 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1942 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1943 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1944 /*
1945 * Packets have drained. Stop transmitter, reset
1946 * FIFO pointers, restart transmitter, and kick
1947 * the packet queue.
1948 */
1949 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1950 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1951 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1952 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1953 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1954 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1955 CSR_WRITE(sc, WMREG_TCTL, tctl);
1956 CSR_WRITE_FLUSH(sc);
1957
1958 sc->sc_txfifo_head = 0;
1959 sc->sc_txfifo_stall = 0;
1960 wm_start(&sc->sc_ethercom.ec_if);
1961 } else {
1962 /*
1963 * Still waiting for packets to drain; try again in
1964 * another tick.
1965 */
1966 callout_schedule(&sc->sc_txfifo_ch, 1);
1967 }
1968 }
1969
1970 splx(s);
1971 }
1972
1973 /*
1974 * wm_82547_txfifo_bugchk:
1975 *
1976 * Check for bug condition in the 82547 Tx FIFO. We need to
1977 * prevent enqueueing a packet that would wrap around the end
1978 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1979 *
1980 * We do this by checking the amount of space before the end
1981 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1982 * the Tx FIFO, wait for all remaining packets to drain, reset
1983 * the internal FIFO pointers to the beginning, and restart
1984 * transmission on the interface.
1985 */
1986 #define WM_FIFO_HDR 0x10
1987 #define WM_82547_PAD_LEN 0x3e0
1988 static int
1989 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1990 {
1991 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1992 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1993
1994 /* Just return if already stalled. */
1995 if (sc->sc_txfifo_stall)
1996 return (1);
1997
1998 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1999 /* Stall only occurs in half-duplex mode. */
2000 goto send_packet;
2001 }
2002
2003 if (len >= WM_82547_PAD_LEN + space) {
2004 sc->sc_txfifo_stall = 1;
2005 callout_schedule(&sc->sc_txfifo_ch, 1);
2006 return (1);
2007 }
2008
2009 send_packet:
2010 sc->sc_txfifo_head += len;
2011 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2012 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2013
2014 return (0);
2015 }
2016
2017 /*
2018 * wm_start: [ifnet interface function]
2019 *
2020 * Start packet transmission on the interface.
2021 */
2022 static void
2023 wm_start(struct ifnet *ifp)
2024 {
2025 struct wm_softc *sc = ifp->if_softc;
2026 struct mbuf *m0;
2027 struct m_tag *mtag;
2028 struct wm_txsoft *txs;
2029 bus_dmamap_t dmamap;
2030 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2031 bus_addr_t curaddr;
2032 bus_size_t seglen, curlen;
2033 uint32_t cksumcmd;
2034 uint8_t cksumfields;
2035
2036 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2037 return;
2038
2039 /*
2040 * Remember the previous number of free descriptors.
2041 */
2042 ofree = sc->sc_txfree;
2043
2044 /*
2045 * Loop through the send queue, setting up transmit descriptors
2046 * until we drain the queue, or use up all available transmit
2047 * descriptors.
2048 */
2049 for (;;) {
2050 /* Grab a packet off the queue. */
2051 IFQ_POLL(&ifp->if_snd, m0);
2052 if (m0 == NULL)
2053 break;
2054
2055 DPRINTF(WM_DEBUG_TX,
2056 ("%s: TX: have packet to transmit: %p\n",
2057 device_xname(sc->sc_dev), m0));
2058
2059 /* Get a work queue entry. */
2060 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2061 wm_txintr(sc);
2062 if (sc->sc_txsfree == 0) {
2063 DPRINTF(WM_DEBUG_TX,
2064 ("%s: TX: no free job descriptors\n",
2065 device_xname(sc->sc_dev)));
2066 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2067 break;
2068 }
2069 }
2070
2071 txs = &sc->sc_txsoft[sc->sc_txsnext];
2072 dmamap = txs->txs_dmamap;
2073
2074 use_tso = (m0->m_pkthdr.csum_flags &
2075 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2076
2077 /*
2078 * So says the Linux driver:
2079 * The controller does a simple calculation to make sure
2080 * there is enough room in the FIFO before initiating the
2081 * DMA for each buffer. The calc is:
2082 * 4 = ceil(buffer len / MSS)
2083 * To make sure we don't overrun the FIFO, adjust the max
2084 * buffer len if the MSS drops.
2085 */
2086 dmamap->dm_maxsegsz =
2087 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2088 ? m0->m_pkthdr.segsz << 2
2089 : WTX_MAX_LEN;
2090
2091 /*
2092 * Load the DMA map. If this fails, the packet either
2093 * didn't fit in the allotted number of segments, or we
2094 * were short on resources. For the too-many-segments
2095 * case, we simply report an error and drop the packet,
2096 * since we can't sanely copy a jumbo packet to a single
2097 * buffer.
2098 */
2099 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2100 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2101 if (error) {
2102 if (error == EFBIG) {
2103 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2104 log(LOG_ERR, "%s: Tx packet consumes too many "
2105 "DMA segments, dropping...\n",
2106 device_xname(sc->sc_dev));
2107 IFQ_DEQUEUE(&ifp->if_snd, m0);
2108 wm_dump_mbuf_chain(sc, m0);
2109 m_freem(m0);
2110 continue;
2111 }
2112 /*
2113 * Short on resources, just stop for now.
2114 */
2115 DPRINTF(WM_DEBUG_TX,
2116 ("%s: TX: dmamap load failed: %d\n",
2117 device_xname(sc->sc_dev), error));
2118 break;
2119 }
2120
2121 segs_needed = dmamap->dm_nsegs;
2122 if (use_tso) {
2123 /* For sentinel descriptor; see below. */
2124 segs_needed++;
2125 }
2126
2127 /*
2128 * Ensure we have enough descriptors free to describe
2129 * the packet. Note, we always reserve one descriptor
2130 * at the end of the ring due to the semantics of the
2131 * TDT register, plus one more in the event we need
2132 * to load offload context.
2133 */
2134 if (segs_needed > sc->sc_txfree - 2) {
2135 /*
2136 * Not enough free descriptors to transmit this
2137 * packet. We haven't committed anything yet,
2138 * so just unload the DMA map, put the packet
2139 * pack on the queue, and punt. Notify the upper
2140 * layer that there are no more slots left.
2141 */
2142 DPRINTF(WM_DEBUG_TX,
2143 ("%s: TX: need %d (%d) descriptors, have %d\n",
2144 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2145 segs_needed, sc->sc_txfree - 1));
2146 ifp->if_flags |= IFF_OACTIVE;
2147 bus_dmamap_unload(sc->sc_dmat, dmamap);
2148 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2149 break;
2150 }
2151
2152 /*
2153 * Check for 82547 Tx FIFO bug. We need to do this
2154 * once we know we can transmit the packet, since we
2155 * do some internal FIFO space accounting here.
2156 */
2157 if (sc->sc_type == WM_T_82547 &&
2158 wm_82547_txfifo_bugchk(sc, m0)) {
2159 DPRINTF(WM_DEBUG_TX,
2160 ("%s: TX: 82547 Tx FIFO bug detected\n",
2161 device_xname(sc->sc_dev)));
2162 ifp->if_flags |= IFF_OACTIVE;
2163 bus_dmamap_unload(sc->sc_dmat, dmamap);
2164 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2165 break;
2166 }
2167
2168 IFQ_DEQUEUE(&ifp->if_snd, m0);
2169
2170 /*
2171 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2172 */
2173
2174 DPRINTF(WM_DEBUG_TX,
2175 ("%s: TX: packet has %d (%d) DMA segments\n",
2176 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2177
2178 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2179
2180 /*
2181 * Store a pointer to the packet so that we can free it
2182 * later.
2183 *
2184 * Initially, we consider the number of descriptors the
2185 * packet uses the number of DMA segments. This may be
2186 * incremented by 1 if we do checksum offload (a descriptor
2187 * is used to set the checksum context).
2188 */
2189 txs->txs_mbuf = m0;
2190 txs->txs_firstdesc = sc->sc_txnext;
2191 txs->txs_ndesc = segs_needed;
2192
2193 /* Set up offload parameters for this packet. */
2194 if (m0->m_pkthdr.csum_flags &
2195 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2196 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2197 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2198 if (wm_tx_offload(sc, txs, &cksumcmd,
2199 &cksumfields) != 0) {
2200 /* Error message already displayed. */
2201 bus_dmamap_unload(sc->sc_dmat, dmamap);
2202 continue;
2203 }
2204 } else {
2205 cksumcmd = 0;
2206 cksumfields = 0;
2207 }
2208
2209 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2210
2211 /* Sync the DMA map. */
2212 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2213 BUS_DMASYNC_PREWRITE);
2214
2215 /*
2216 * Initialize the transmit descriptor.
2217 */
2218 for (nexttx = sc->sc_txnext, seg = 0;
2219 seg < dmamap->dm_nsegs; seg++) {
2220 for (seglen = dmamap->dm_segs[seg].ds_len,
2221 curaddr = dmamap->dm_segs[seg].ds_addr;
2222 seglen != 0;
2223 curaddr += curlen, seglen -= curlen,
2224 nexttx = WM_NEXTTX(sc, nexttx)) {
2225 curlen = seglen;
2226
2227 /*
2228 * So says the Linux driver:
2229 * Work around for premature descriptor
2230 * write-backs in TSO mode. Append a
2231 * 4-byte sentinel descriptor.
2232 */
2233 if (use_tso &&
2234 seg == dmamap->dm_nsegs - 1 &&
2235 curlen > 8)
2236 curlen -= 4;
2237
2238 wm_set_dma_addr(
2239 &sc->sc_txdescs[nexttx].wtx_addr,
2240 curaddr);
2241 sc->sc_txdescs[nexttx].wtx_cmdlen =
2242 htole32(cksumcmd | curlen);
2243 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2244 0;
2245 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2246 cksumfields;
2247 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2248 lasttx = nexttx;
2249
2250 DPRINTF(WM_DEBUG_TX,
2251 ("%s: TX: desc %d: low 0x%08lx, "
2252 "len 0x%04x\n",
2253 device_xname(sc->sc_dev), nexttx,
2254 curaddr & 0xffffffffUL, (unsigned)curlen));
2255 }
2256 }
2257
2258 KASSERT(lasttx != -1);
2259
2260 /*
2261 * Set up the command byte on the last descriptor of
2262 * the packet. If we're in the interrupt delay window,
2263 * delay the interrupt.
2264 */
2265 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2266 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2267
2268 /*
2269 * If VLANs are enabled and the packet has a VLAN tag, set
2270 * up the descriptor to encapsulate the packet for us.
2271 *
2272 * This is only valid on the last descriptor of the packet.
2273 */
2274 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2275 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2276 htole32(WTX_CMD_VLE);
2277 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2278 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2279 }
2280
2281 txs->txs_lastdesc = lasttx;
2282
2283 DPRINTF(WM_DEBUG_TX,
2284 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2285 device_xname(sc->sc_dev),
2286 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2287
2288 /* Sync the descriptors we're using. */
2289 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2290 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2291
2292 /* Give the packet to the chip. */
2293 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2294
2295 DPRINTF(WM_DEBUG_TX,
2296 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2297
2298 DPRINTF(WM_DEBUG_TX,
2299 ("%s: TX: finished transmitting packet, job %d\n",
2300 device_xname(sc->sc_dev), sc->sc_txsnext));
2301
2302 /* Advance the tx pointer. */
2303 sc->sc_txfree -= txs->txs_ndesc;
2304 sc->sc_txnext = nexttx;
2305
2306 sc->sc_txsfree--;
2307 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2308
2309 #if NBPFILTER > 0
2310 /* Pass the packet to any BPF listeners. */
2311 if (ifp->if_bpf)
2312 bpf_mtap(ifp->if_bpf, m0);
2313 #endif /* NBPFILTER > 0 */
2314 }
2315
2316 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2317 /* No more slots; notify upper layer. */
2318 ifp->if_flags |= IFF_OACTIVE;
2319 }
2320
2321 if (sc->sc_txfree != ofree) {
2322 /* Set a watchdog timer in case the chip flakes out. */
2323 ifp->if_timer = 5;
2324 }
2325 }
2326
2327 /*
2328 * wm_watchdog: [ifnet interface function]
2329 *
2330 * Watchdog timer handler.
2331 */
2332 static void
2333 wm_watchdog(struct ifnet *ifp)
2334 {
2335 struct wm_softc *sc = ifp->if_softc;
2336
2337 /*
2338 * Since we're using delayed interrupts, sweep up
2339 * before we report an error.
2340 */
2341 wm_txintr(sc);
2342
2343 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2344 log(LOG_ERR,
2345 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2346 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2347 sc->sc_txnext);
2348 ifp->if_oerrors++;
2349
2350 /* Reset the interface. */
2351 (void) wm_init(ifp);
2352 }
2353
2354 /* Try to get more packets going. */
2355 wm_start(ifp);
2356 }
2357
2358 /*
2359 * wm_ioctl: [ifnet interface function]
2360 *
2361 * Handle control requests from the operator.
2362 */
2363 static int
2364 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2365 {
2366 struct wm_softc *sc = ifp->if_softc;
2367 struct ifreq *ifr = (struct ifreq *) data;
2368 struct ifaddr *ifa = (struct ifaddr *)data;
2369 struct sockaddr_dl *sdl;
2370 int s, error;
2371
2372 s = splnet();
2373
2374 switch (cmd) {
2375 case SIOCSIFMEDIA:
2376 case SIOCGIFMEDIA:
2377 /* Flow control requires full-duplex mode. */
2378 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2379 (ifr->ifr_media & IFM_FDX) == 0)
2380 ifr->ifr_media &= ~IFM_ETH_FMASK;
2381 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2382 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2383 /* We can do both TXPAUSE and RXPAUSE. */
2384 ifr->ifr_media |=
2385 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2386 }
2387 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2388 }
2389 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2390 break;
2391 case SIOCINITIFADDR:
2392 if (ifa->ifa_addr->sa_family == AF_LINK) {
2393 sdl = satosdl(ifp->if_dl->ifa_addr);
2394 (void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2395 LLADDR(satosdl(ifa->ifa_addr)),
2396 ifp->if_addrlen);
2397 /* unicast address is first multicast entry */
2398 wm_set_filter(sc);
2399 error = 0;
2400 break;
2401 }
2402 /* Fall through for rest */
2403 default:
2404 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2405 break;
2406
2407 error = 0;
2408
2409 if (cmd == SIOCSIFCAP)
2410 error = (*ifp->if_init)(ifp);
2411 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2412 ;
2413 else if (ifp->if_flags & IFF_RUNNING) {
2414 /*
2415 * Multicast list has changed; set the hardware filter
2416 * accordingly.
2417 */
2418 wm_set_filter(sc);
2419 }
2420 break;
2421 }
2422
2423 /* Try to get more packets going. */
2424 wm_start(ifp);
2425
2426 splx(s);
2427 return (error);
2428 }
2429
2430 /*
2431 * wm_intr:
2432 *
2433 * Interrupt service routine.
2434 */
2435 static int
2436 wm_intr(void *arg)
2437 {
2438 struct wm_softc *sc = arg;
2439 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2440 uint32_t icr;
2441 int handled = 0;
2442
2443 while (1 /* CONSTCOND */) {
2444 icr = CSR_READ(sc, WMREG_ICR);
2445 if ((icr & sc->sc_icr) == 0)
2446 break;
2447 #if 0 /*NRND > 0*/
2448 if (RND_ENABLED(&sc->rnd_source))
2449 rnd_add_uint32(&sc->rnd_source, icr);
2450 #endif
2451
2452 handled = 1;
2453
2454 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2455 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2456 DPRINTF(WM_DEBUG_RX,
2457 ("%s: RX: got Rx intr 0x%08x\n",
2458 device_xname(sc->sc_dev),
2459 icr & (ICR_RXDMT0|ICR_RXT0)));
2460 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2461 }
2462 #endif
2463 wm_rxintr(sc);
2464
2465 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2466 if (icr & ICR_TXDW) {
2467 DPRINTF(WM_DEBUG_TX,
2468 ("%s: TX: got TXDW interrupt\n",
2469 device_xname(sc->sc_dev)));
2470 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2471 }
2472 #endif
2473 wm_txintr(sc);
2474
2475 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2476 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2477 wm_linkintr(sc, icr);
2478 }
2479
2480 if (icr & ICR_RXO) {
2481 ifp->if_ierrors++;
2482 #if defined(WM_DEBUG)
2483 log(LOG_WARNING, "%s: Receive overrun\n",
2484 device_xname(sc->sc_dev));
2485 #endif /* defined(WM_DEBUG) */
2486 }
2487 }
2488
2489 if (handled) {
2490 /* Try to get more packets going. */
2491 wm_start(ifp);
2492 }
2493
2494 return (handled);
2495 }
2496
2497 /*
2498 * wm_txintr:
2499 *
2500 * Helper; handle transmit interrupts.
2501 */
2502 static void
2503 wm_txintr(struct wm_softc *sc)
2504 {
2505 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2506 struct wm_txsoft *txs;
2507 uint8_t status;
2508 int i;
2509
2510 ifp->if_flags &= ~IFF_OACTIVE;
2511
2512 /*
2513 * Go through the Tx list and free mbufs for those
2514 * frames which have been transmitted.
2515 */
2516 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2517 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2518 txs = &sc->sc_txsoft[i];
2519
2520 DPRINTF(WM_DEBUG_TX,
2521 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2522
2523 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2524 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2525
2526 status =
2527 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2528 if ((status & WTX_ST_DD) == 0) {
2529 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2530 BUS_DMASYNC_PREREAD);
2531 break;
2532 }
2533
2534 DPRINTF(WM_DEBUG_TX,
2535 ("%s: TX: job %d done: descs %d..%d\n",
2536 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2537 txs->txs_lastdesc));
2538
2539 /*
2540 * XXX We should probably be using the statistics
2541 * XXX registers, but I don't know if they exist
2542 * XXX on chips before the i82544.
2543 */
2544
2545 #ifdef WM_EVENT_COUNTERS
2546 if (status & WTX_ST_TU)
2547 WM_EVCNT_INCR(&sc->sc_ev_tu);
2548 #endif /* WM_EVENT_COUNTERS */
2549
2550 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2551 ifp->if_oerrors++;
2552 if (status & WTX_ST_LC)
2553 log(LOG_WARNING, "%s: late collision\n",
2554 device_xname(sc->sc_dev));
2555 else if (status & WTX_ST_EC) {
2556 ifp->if_collisions += 16;
2557 log(LOG_WARNING, "%s: excessive collisions\n",
2558 device_xname(sc->sc_dev));
2559 }
2560 } else
2561 ifp->if_opackets++;
2562
2563 sc->sc_txfree += txs->txs_ndesc;
2564 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2565 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2566 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2567 m_freem(txs->txs_mbuf);
2568 txs->txs_mbuf = NULL;
2569 }
2570
2571 /* Update the dirty transmit buffer pointer. */
2572 sc->sc_txsdirty = i;
2573 DPRINTF(WM_DEBUG_TX,
2574 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2575
2576 /*
2577 * If there are no more pending transmissions, cancel the watchdog
2578 * timer.
2579 */
2580 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2581 ifp->if_timer = 0;
2582 }
2583
2584 /*
2585 * wm_rxintr:
2586 *
2587 * Helper; handle receive interrupts.
2588 */
2589 static void
2590 wm_rxintr(struct wm_softc *sc)
2591 {
2592 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2593 struct wm_rxsoft *rxs;
2594 struct mbuf *m;
2595 int i, len;
2596 uint8_t status, errors;
2597 uint16_t vlantag;
2598
2599 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2600 rxs = &sc->sc_rxsoft[i];
2601
2602 DPRINTF(WM_DEBUG_RX,
2603 ("%s: RX: checking descriptor %d\n",
2604 device_xname(sc->sc_dev), i));
2605
2606 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2607
2608 status = sc->sc_rxdescs[i].wrx_status;
2609 errors = sc->sc_rxdescs[i].wrx_errors;
2610 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2611 vlantag = sc->sc_rxdescs[i].wrx_special;
2612
2613 if ((status & WRX_ST_DD) == 0) {
2614 /*
2615 * We have processed all of the receive descriptors.
2616 */
2617 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2618 break;
2619 }
2620
2621 if (__predict_false(sc->sc_rxdiscard)) {
2622 DPRINTF(WM_DEBUG_RX,
2623 ("%s: RX: discarding contents of descriptor %d\n",
2624 device_xname(sc->sc_dev), i));
2625 WM_INIT_RXDESC(sc, i);
2626 if (status & WRX_ST_EOP) {
2627 /* Reset our state. */
2628 DPRINTF(WM_DEBUG_RX,
2629 ("%s: RX: resetting rxdiscard -> 0\n",
2630 device_xname(sc->sc_dev)));
2631 sc->sc_rxdiscard = 0;
2632 }
2633 continue;
2634 }
2635
2636 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2637 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2638
2639 m = rxs->rxs_mbuf;
2640
2641 /*
2642 * Add a new receive buffer to the ring, unless of
2643 * course the length is zero. Treat the latter as a
2644 * failed mapping.
2645 */
2646 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2647 /*
2648 * Failed, throw away what we've done so
2649 * far, and discard the rest of the packet.
2650 */
2651 ifp->if_ierrors++;
2652 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2653 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2654 WM_INIT_RXDESC(sc, i);
2655 if ((status & WRX_ST_EOP) == 0)
2656 sc->sc_rxdiscard = 1;
2657 if (sc->sc_rxhead != NULL)
2658 m_freem(sc->sc_rxhead);
2659 WM_RXCHAIN_RESET(sc);
2660 DPRINTF(WM_DEBUG_RX,
2661 ("%s: RX: Rx buffer allocation failed, "
2662 "dropping packet%s\n", device_xname(sc->sc_dev),
2663 sc->sc_rxdiscard ? " (discard)" : ""));
2664 continue;
2665 }
2666
2667 m->m_len = len;
2668 sc->sc_rxlen += len;
2669 DPRINTF(WM_DEBUG_RX,
2670 ("%s: RX: buffer at %p len %d\n",
2671 device_xname(sc->sc_dev), m->m_data, len));
2672
2673 /*
2674 * If this is not the end of the packet, keep
2675 * looking.
2676 */
2677 if ((status & WRX_ST_EOP) == 0) {
2678 WM_RXCHAIN_LINK(sc, m);
2679 DPRINTF(WM_DEBUG_RX,
2680 ("%s: RX: not yet EOP, rxlen -> %d\n",
2681 device_xname(sc->sc_dev), sc->sc_rxlen));
2682 continue;
2683 }
2684
2685 /*
2686 * Okay, we have the entire packet now. The chip is
2687 * configured to include the FCS (not all chips can
2688 * be configured to strip it), so we need to trim it.
2689 * May need to adjust length of previous mbuf in the
2690 * chain if the current mbuf is too short.
2691 */
2692 if (m->m_len < ETHER_CRC_LEN) {
2693 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2694 m->m_len = 0;
2695 } else {
2696 m->m_len -= ETHER_CRC_LEN;
2697 }
2698 len = sc->sc_rxlen - ETHER_CRC_LEN;
2699
2700 WM_RXCHAIN_LINK(sc, m);
2701
2702 *sc->sc_rxtailp = NULL;
2703 m = sc->sc_rxhead;
2704
2705 WM_RXCHAIN_RESET(sc);
2706
2707 DPRINTF(WM_DEBUG_RX,
2708 ("%s: RX: have entire packet, len -> %d\n",
2709 device_xname(sc->sc_dev), len));
2710
2711 /*
2712 * If an error occurred, update stats and drop the packet.
2713 */
2714 if (errors &
2715 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2716 ifp->if_ierrors++;
2717 if (errors & WRX_ER_SE)
2718 log(LOG_WARNING, "%s: symbol error\n",
2719 device_xname(sc->sc_dev));
2720 else if (errors & WRX_ER_SEQ)
2721 log(LOG_WARNING, "%s: receive sequence error\n",
2722 device_xname(sc->sc_dev));
2723 else if (errors & WRX_ER_CE)
2724 log(LOG_WARNING, "%s: CRC error\n",
2725 device_xname(sc->sc_dev));
2726 m_freem(m);
2727 continue;
2728 }
2729
2730 /*
2731 * No errors. Receive the packet.
2732 */
2733 m->m_pkthdr.rcvif = ifp;
2734 m->m_pkthdr.len = len;
2735
2736 /*
2737 * If VLANs are enabled, VLAN packets have been unwrapped
2738 * for us. Associate the tag with the packet.
2739 */
2740 if ((status & WRX_ST_VP) != 0) {
2741 VLAN_INPUT_TAG(ifp, m,
2742 le16toh(vlantag),
2743 continue);
2744 }
2745
2746 /*
2747 * Set up checksum info for this packet.
2748 */
2749 if ((status & WRX_ST_IXSM) == 0) {
2750 if (status & WRX_ST_IPCS) {
2751 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2752 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2753 if (errors & WRX_ER_IPE)
2754 m->m_pkthdr.csum_flags |=
2755 M_CSUM_IPv4_BAD;
2756 }
2757 if (status & WRX_ST_TCPCS) {
2758 /*
2759 * Note: we don't know if this was TCP or UDP,
2760 * so we just set both bits, and expect the
2761 * upper layers to deal.
2762 */
2763 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2764 m->m_pkthdr.csum_flags |=
2765 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2766 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2767 if (errors & WRX_ER_TCPE)
2768 m->m_pkthdr.csum_flags |=
2769 M_CSUM_TCP_UDP_BAD;
2770 }
2771 }
2772
2773 ifp->if_ipackets++;
2774
2775 #if NBPFILTER > 0
2776 /* Pass this up to any BPF listeners. */
2777 if (ifp->if_bpf)
2778 bpf_mtap(ifp->if_bpf, m);
2779 #endif /* NBPFILTER > 0 */
2780
2781 /* Pass it on. */
2782 (*ifp->if_input)(ifp, m);
2783 }
2784
2785 /* Update the receive pointer. */
2786 sc->sc_rxptr = i;
2787
2788 DPRINTF(WM_DEBUG_RX,
2789 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2790 }
2791
2792 /*
2793 * wm_linkintr:
2794 *
2795 * Helper; handle link interrupts.
2796 */
2797 static void
2798 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2799 {
2800 uint32_t status;
2801
2802 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2803 __func__));
2804 /*
2805 * If we get a link status interrupt on a 1000BASE-T
2806 * device, just fall into the normal MII tick path.
2807 */
2808 if (sc->sc_flags & WM_F_HAS_MII) {
2809 if (icr & ICR_LSC) {
2810 DPRINTF(WM_DEBUG_LINK,
2811 ("%s: LINK: LSC -> mii_tick\n",
2812 device_xname(sc->sc_dev)));
2813 mii_tick(&sc->sc_mii);
2814 if (sc->sc_type == WM_T_82543) {
2815 int miistatus, active;
2816
2817 /*
2818 * With 82543, we need to force speed and
2819 * duplex on the MAC equal to what the PHY
2820 * speed and duplex configuration is.
2821 */
2822 miistatus = sc->sc_mii.mii_media_status;
2823
2824 if (miistatus & IFM_ACTIVE) {
2825 active = sc->sc_mii.mii_media_active;
2826 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2827 | CTRL_FD);
2828 switch (IFM_SUBTYPE(active)) {
2829 case IFM_10_T:
2830 sc->sc_ctrl |= CTRL_SPEED_10;
2831 break;
2832 case IFM_100_TX:
2833 sc->sc_ctrl |= CTRL_SPEED_100;
2834 break;
2835 case IFM_1000_T:
2836 sc->sc_ctrl |= CTRL_SPEED_1000;
2837 break;
2838 default:
2839 /*
2840 * fiber?
2841 * Shoud not enter here.
2842 */
2843 printf("unknown media (%x)\n",
2844 active);
2845 break;
2846 }
2847 if (active & IFM_FDX)
2848 sc->sc_ctrl |= CTRL_FD;
2849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2850 }
2851 }
2852 } else if (icr & ICR_RXSEQ) {
2853 DPRINTF(WM_DEBUG_LINK,
2854 ("%s: LINK Receive sequence error\n",
2855 device_xname(sc->sc_dev)));
2856 }
2857 return;
2858 }
2859
2860 status = CSR_READ(sc, WMREG_STATUS);
2861 if (icr & ICR_LSC) {
2862 if (status & STATUS_LU) {
2863 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2864 device_xname(sc->sc_dev),
2865 (status & STATUS_FD) ? "FDX" : "HDX"));
2866 /*
2867 * NOTE: CTRL will update TFCE and RFCE automatically,
2868 * so we should update sc->sc_ctrl
2869 */
2870
2871 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2872 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2873 sc->sc_fcrtl &= ~FCRTL_XONE;
2874 if (status & STATUS_FD)
2875 sc->sc_tctl |=
2876 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2877 else
2878 sc->sc_tctl |=
2879 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2880 if (sc->sc_ctrl & CTRL_TFCE)
2881 sc->sc_fcrtl |= FCRTL_XONE;
2882 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2883 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2884 WMREG_OLD_FCRTL : WMREG_FCRTL,
2885 sc->sc_fcrtl);
2886 sc->sc_tbi_linkup = 1;
2887 } else {
2888 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2889 device_xname(sc->sc_dev)));
2890 sc->sc_tbi_linkup = 0;
2891 }
2892 wm_tbi_set_linkled(sc);
2893 } else if (icr & ICR_RXCFG) {
2894 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2895 device_xname(sc->sc_dev)));
2896 sc->sc_tbi_nrxcfg++;
2897 wm_check_for_link(sc);
2898 } else if (icr & ICR_RXSEQ) {
2899 DPRINTF(WM_DEBUG_LINK,
2900 ("%s: LINK: Receive sequence error\n",
2901 device_xname(sc->sc_dev)));
2902 }
2903 }
2904
2905 /*
2906 * wm_tick:
2907 *
2908 * One second timer, used to check link status, sweep up
2909 * completed transmit jobs, etc.
2910 */
2911 static void
2912 wm_tick(void *arg)
2913 {
2914 struct wm_softc *sc = arg;
2915 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2916 int s;
2917
2918 s = splnet();
2919
2920 if (sc->sc_type >= WM_T_82542_2_1) {
2921 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2922 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2923 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2924 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2925 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2926 }
2927
2928 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2929 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2930
2931
2932 if (sc->sc_flags & WM_F_HAS_MII)
2933 mii_tick(&sc->sc_mii);
2934 else
2935 wm_tbi_check_link(sc);
2936
2937 splx(s);
2938
2939 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2940 }
2941
2942 /*
2943 * wm_reset:
2944 *
2945 * Reset the i82542 chip.
2946 */
2947 static void
2948 wm_reset(struct wm_softc *sc)
2949 {
2950 uint32_t reg;
2951
2952 /*
2953 * Allocate on-chip memory according to the MTU size.
2954 * The Packet Buffer Allocation register must be written
2955 * before the chip is reset.
2956 */
2957 switch (sc->sc_type) {
2958 case WM_T_82547:
2959 case WM_T_82547_2:
2960 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2961 PBA_22K : PBA_30K;
2962 sc->sc_txfifo_head = 0;
2963 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2964 sc->sc_txfifo_size =
2965 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2966 sc->sc_txfifo_stall = 0;
2967 break;
2968 case WM_T_82571:
2969 case WM_T_82572:
2970 case WM_T_80003:
2971 sc->sc_pba = PBA_32K;
2972 break;
2973 case WM_T_82573:
2974 case WM_T_82574:
2975 sc->sc_pba = PBA_12K;
2976 break;
2977 case WM_T_ICH8:
2978 sc->sc_pba = PBA_8K;
2979 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2980 break;
2981 case WM_T_ICH9:
2982 case WM_T_ICH10:
2983 sc->sc_pba = PBA_10K;
2984 break;
2985 default:
2986 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2987 PBA_40K : PBA_48K;
2988 break;
2989 }
2990 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2991
2992 if (sc->sc_flags & WM_F_PCIE) {
2993 int timeout = 800;
2994
2995 sc->sc_ctrl |= CTRL_GIO_M_DIS;
2996 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2997
2998 while (timeout) {
2999 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
3000 break;
3001 delay(100);
3002 }
3003 }
3004
3005 /* clear interrupt */
3006 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3007
3008 /*
3009 * 82541 Errata 29? & 82547 Errata 28?
3010 * See also the description about PHY_RST bit in CTRL register
3011 * in 8254x_GBe_SDM.pdf.
3012 */
3013 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3014 CSR_WRITE(sc, WMREG_CTRL,
3015 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3016 delay(5000);
3017 }
3018
3019 switch (sc->sc_type) {
3020 case WM_T_82544:
3021 case WM_T_82540:
3022 case WM_T_82545:
3023 case WM_T_82546:
3024 case WM_T_82541:
3025 case WM_T_82541_2:
3026 /*
3027 * On some chipsets, a reset through a memory-mapped write
3028 * cycle can cause the chip to reset before completing the
3029 * write cycle. This causes major headache that can be
3030 * avoided by issuing the reset via indirect register writes
3031 * through I/O space.
3032 *
3033 * So, if we successfully mapped the I/O BAR at attach time,
3034 * use that. Otherwise, try our luck with a memory-mapped
3035 * reset.
3036 */
3037 if (sc->sc_flags & WM_F_IOH_VALID)
3038 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3039 else
3040 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3041 break;
3042
3043 case WM_T_82545_3:
3044 case WM_T_82546_3:
3045 /* Use the shadow control register on these chips. */
3046 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3047 break;
3048
3049 case WM_T_ICH8:
3050 case WM_T_ICH9:
3051 case WM_T_ICH10:
3052 wm_get_swfwhw_semaphore(sc);
3053 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3054 delay(10000);
3055
3056 default:
3057 /* Everything else can safely use the documented method. */
3058 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3059 break;
3060 }
3061 delay(10000);
3062
3063 /* reload EEPROM */
3064 switch(sc->sc_type) {
3065 case WM_T_82542_2_0:
3066 case WM_T_82542_2_1:
3067 case WM_T_82543:
3068 case WM_T_82544:
3069 delay(10);
3070 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3071 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3072 delay(2000);
3073 break;
3074 case WM_T_82541:
3075 case WM_T_82541_2:
3076 case WM_T_82547:
3077 case WM_T_82547_2:
3078 delay(20000);
3079 break;
3080 case WM_T_82573:
3081 case WM_T_82574:
3082 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3083 delay(10);
3084 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3085 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3086 }
3087 /* FALLTHROUGH */
3088 default:
3089 /* check EECD_EE_AUTORD */
3090 wm_get_auto_rd_done(sc);
3091 }
3092
3093 /* reload sc_ctrl */
3094 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3095
3096 #if 0
3097 for (i = 0; i < 1000; i++) {
3098 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3099 return;
3100 }
3101 delay(20);
3102 }
3103
3104 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3105 log(LOG_ERR, "%s: reset failed to complete\n",
3106 device_xname(sc->sc_dev));
3107 #endif
3108 }
3109
3110 /*
3111 * wm_init: [ifnet interface function]
3112 *
3113 * Initialize the interface. Must be called at splnet().
3114 */
3115 static int
3116 wm_init(struct ifnet *ifp)
3117 {
3118 struct wm_softc *sc = ifp->if_softc;
3119 struct wm_rxsoft *rxs;
3120 int i, error = 0;
3121 uint32_t reg;
3122
3123 /*
3124 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3125 * There is a small but measurable benefit to avoiding the adjusment
3126 * of the descriptor so that the headers are aligned, for normal mtu,
3127 * on such platforms. One possibility is that the DMA itself is
3128 * slightly more efficient if the front of the entire packet (instead
3129 * of the front of the headers) is aligned.
3130 *
3131 * Note we must always set align_tweak to 0 if we are using
3132 * jumbo frames.
3133 */
3134 #ifdef __NO_STRICT_ALIGNMENT
3135 sc->sc_align_tweak = 0;
3136 #else
3137 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3138 sc->sc_align_tweak = 0;
3139 else
3140 sc->sc_align_tweak = 2;
3141 #endif /* __NO_STRICT_ALIGNMENT */
3142
3143 /* Cancel any pending I/O. */
3144 wm_stop(ifp, 0);
3145
3146 /* update statistics before reset */
3147 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3148 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3149
3150 /* Reset the chip to a known state. */
3151 wm_reset(sc);
3152
3153 switch (sc->sc_type) {
3154 case WM_T_82571:
3155 case WM_T_82572:
3156 case WM_T_82573:
3157 case WM_T_82574:
3158 case WM_T_80003:
3159 case WM_T_ICH8:
3160 case WM_T_ICH9:
3161 case WM_T_ICH10:
3162 if (wm_check_mng_mode(sc) != 0)
3163 wm_get_hw_control(sc);
3164 break;
3165 default:
3166 break;
3167 }
3168
3169 /* Initialize the transmit descriptor ring. */
3170 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3171 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3172 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3173 sc->sc_txfree = WM_NTXDESC(sc);
3174 sc->sc_txnext = 0;
3175
3176 if (sc->sc_type < WM_T_82543) {
3177 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3178 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3179 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3180 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3181 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3182 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3183 } else {
3184 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3185 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3186 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3187 CSR_WRITE(sc, WMREG_TDH, 0);
3188 CSR_WRITE(sc, WMREG_TDT, 0);
3189 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3190 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3191
3192 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3193 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3194 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3195 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3196 }
3197 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3198 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3199
3200 /* Initialize the transmit job descriptors. */
3201 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3202 sc->sc_txsoft[i].txs_mbuf = NULL;
3203 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3204 sc->sc_txsnext = 0;
3205 sc->sc_txsdirty = 0;
3206
3207 /*
3208 * Initialize the receive descriptor and receive job
3209 * descriptor rings.
3210 */
3211 if (sc->sc_type < WM_T_82543) {
3212 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3213 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3214 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3215 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3216 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3217 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3218
3219 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3220 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3221 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3222 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3223 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3224 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3225 } else {
3226 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3227 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3228 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3229 CSR_WRITE(sc, WMREG_RDH, 0);
3230 CSR_WRITE(sc, WMREG_RDT, 0);
3231 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3232 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3233 }
3234 for (i = 0; i < WM_NRXDESC; i++) {
3235 rxs = &sc->sc_rxsoft[i];
3236 if (rxs->rxs_mbuf == NULL) {
3237 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3238 log(LOG_ERR, "%s: unable to allocate or map rx "
3239 "buffer %d, error = %d\n",
3240 device_xname(sc->sc_dev), i, error);
3241 /*
3242 * XXX Should attempt to run with fewer receive
3243 * XXX buffers instead of just failing.
3244 */
3245 wm_rxdrain(sc);
3246 goto out;
3247 }
3248 } else
3249 WM_INIT_RXDESC(sc, i);
3250 }
3251 sc->sc_rxptr = 0;
3252 sc->sc_rxdiscard = 0;
3253 WM_RXCHAIN_RESET(sc);
3254
3255 /*
3256 * Clear out the VLAN table -- we don't use it (yet).
3257 */
3258 CSR_WRITE(sc, WMREG_VET, 0);
3259 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3260 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3261
3262 /*
3263 * Set up flow-control parameters.
3264 *
3265 * XXX Values could probably stand some tuning.
3266 */
3267 if (sc->sc_type != WM_T_ICH8) {
3268 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3269 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3270 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3271 }
3272
3273 sc->sc_fcrtl = FCRTL_DFLT;
3274 if (sc->sc_type < WM_T_82543) {
3275 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3276 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3277 } else {
3278 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3279 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3280 }
3281 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3282
3283 /* Deal with VLAN enables. */
3284 if (VLAN_ATTACHED(&sc->sc_ethercom))
3285 sc->sc_ctrl |= CTRL_VME;
3286 else
3287 sc->sc_ctrl &= ~CTRL_VME;
3288
3289 /* Write the control registers. */
3290 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3291 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3292 int val;
3293 val = CSR_READ(sc, WMREG_CTRL_EXT);
3294 val &= ~CTRL_EXT_LINK_MODE_MASK;
3295 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3296
3297 /* Bypass RX and TX FIFO's */
3298 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3299 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3300 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3301
3302 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3303 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3304 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3305 /*
3306 * Set the mac to wait the maximum time between each
3307 * iteration and increase the max iterations when
3308 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3309 */
3310 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3311 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3312 val |= 0x3F;
3313 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3314 }
3315 #if 0
3316 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3317 #endif
3318
3319 /*
3320 * Set up checksum offload parameters.
3321 */
3322 reg = CSR_READ(sc, WMREG_RXCSUM);
3323 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3324 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3325 reg |= RXCSUM_IPOFL;
3326 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3327 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3328 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3329 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3330 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3331
3332 /* Reset TBI's RXCFG count */
3333 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3334
3335 /*
3336 * Set up the interrupt registers.
3337 */
3338 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3339 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3340 ICR_RXO | ICR_RXT0;
3341 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3342 sc->sc_icr |= ICR_RXCFG;
3343 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3344
3345 /* Set up the inter-packet gap. */
3346 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3347
3348 if (sc->sc_type >= WM_T_82543) {
3349 /*
3350 * Set up the interrupt throttling register (units of 256ns)
3351 * Note that a footnote in Intel's documentation says this
3352 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3353 * or 10Mbit mode. Empirically, it appears to be the case
3354 * that that is also true for the 1024ns units of the other
3355 * interrupt-related timer registers -- so, really, we ought
3356 * to divide this value by 4 when the link speed is low.
3357 *
3358 * XXX implement this division at link speed change!
3359 */
3360
3361 /*
3362 * For N interrupts/sec, set this value to:
3363 * 1000000000 / (N * 256). Note that we set the
3364 * absolute and packet timer values to this value
3365 * divided by 4 to get "simple timer" behavior.
3366 */
3367
3368 sc->sc_itr = 1500; /* 2604 ints/sec */
3369 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3370 }
3371
3372 /* Set the VLAN ethernetype. */
3373 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3374
3375 /*
3376 * Set up the transmit control register; we start out with
3377 * a collision distance suitable for FDX, but update it whe
3378 * we resolve the media type.
3379 */
3380 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3381 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3382 if (sc->sc_type >= WM_T_82571)
3383 sc->sc_tctl |= TCTL_MULR;
3384 if (sc->sc_type >= WM_T_80003)
3385 sc->sc_tctl |= TCTL_RTLC;
3386 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3387
3388 /* Set the media. */
3389 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3390 goto out;
3391
3392 /*
3393 * Set up the receive control register; we actually program
3394 * the register when we set the receive filter. Use multicast
3395 * address offset type 0.
3396 *
3397 * Only the i82544 has the ability to strip the incoming
3398 * CRC, so we don't enable that feature.
3399 */
3400 sc->sc_mchash_type = 0;
3401 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3402 | RCTL_MO(sc->sc_mchash_type);
3403
3404 /* 82573 doesn't support jumbo frame */
3405 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3406 sc->sc_type != WM_T_ICH8)
3407 sc->sc_rctl |= RCTL_LPE;
3408
3409 if (MCLBYTES == 2048) {
3410 sc->sc_rctl |= RCTL_2k;
3411 } else {
3412 if (sc->sc_type >= WM_T_82543) {
3413 switch(MCLBYTES) {
3414 case 4096:
3415 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3416 break;
3417 case 8192:
3418 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3419 break;
3420 case 16384:
3421 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3422 break;
3423 default:
3424 panic("wm_init: MCLBYTES %d unsupported",
3425 MCLBYTES);
3426 break;
3427 }
3428 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3429 }
3430
3431 /* Set the receive filter. */
3432 wm_set_filter(sc);
3433
3434 /* Start the one second link check clock. */
3435 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3436
3437 /* ...all done! */
3438 ifp->if_flags |= IFF_RUNNING;
3439 ifp->if_flags &= ~IFF_OACTIVE;
3440
3441 out:
3442 if (error)
3443 log(LOG_ERR, "%s: interface not running\n",
3444 device_xname(sc->sc_dev));
3445 return (error);
3446 }
3447
3448 /*
3449 * wm_rxdrain:
3450 *
3451 * Drain the receive queue.
3452 */
3453 static void
3454 wm_rxdrain(struct wm_softc *sc)
3455 {
3456 struct wm_rxsoft *rxs;
3457 int i;
3458
3459 for (i = 0; i < WM_NRXDESC; i++) {
3460 rxs = &sc->sc_rxsoft[i];
3461 if (rxs->rxs_mbuf != NULL) {
3462 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3463 m_freem(rxs->rxs_mbuf);
3464 rxs->rxs_mbuf = NULL;
3465 }
3466 }
3467 }
3468
3469 /*
3470 * wm_stop: [ifnet interface function]
3471 *
3472 * Stop transmission on the interface.
3473 */
3474 static void
3475 wm_stop(struct ifnet *ifp, int disable)
3476 {
3477 struct wm_softc *sc = ifp->if_softc;
3478 struct wm_txsoft *txs;
3479 int i;
3480
3481 /* Stop the one second clock. */
3482 callout_stop(&sc->sc_tick_ch);
3483
3484 /* Stop the 82547 Tx FIFO stall check timer. */
3485 if (sc->sc_type == WM_T_82547)
3486 callout_stop(&sc->sc_txfifo_ch);
3487
3488 if (sc->sc_flags & WM_F_HAS_MII) {
3489 /* Down the MII. */
3490 mii_down(&sc->sc_mii);
3491 } else {
3492 #if 0
3493 /* Should we clear PHY's status properly? */
3494 wm_reset(sc);
3495 #endif
3496 }
3497
3498 /* Stop the transmit and receive processes. */
3499 CSR_WRITE(sc, WMREG_TCTL, 0);
3500 CSR_WRITE(sc, WMREG_RCTL, 0);
3501
3502 /*
3503 * Clear the interrupt mask to ensure the device cannot assert its
3504 * interrupt line.
3505 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3506 * any currently pending or shared interrupt.
3507 */
3508 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3509 sc->sc_icr = 0;
3510
3511 /* Release any queued transmit buffers. */
3512 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3513 txs = &sc->sc_txsoft[i];
3514 if (txs->txs_mbuf != NULL) {
3515 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3516 m_freem(txs->txs_mbuf);
3517 txs->txs_mbuf = NULL;
3518 }
3519 }
3520
3521 /* Mark the interface as down and cancel the watchdog timer. */
3522 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3523 ifp->if_timer = 0;
3524
3525 if (disable)
3526 wm_rxdrain(sc);
3527 }
3528
3529 void
3530 wm_get_auto_rd_done(struct wm_softc *sc)
3531 {
3532 int i;
3533
3534 /* wait for eeprom to reload */
3535 switch (sc->sc_type) {
3536 case WM_T_82571:
3537 case WM_T_82572:
3538 case WM_T_82573:
3539 case WM_T_82574:
3540 case WM_T_80003:
3541 case WM_T_ICH8:
3542 case WM_T_ICH9:
3543 case WM_T_ICH10:
3544 for (i = 10; i > 0; i--) {
3545 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3546 break;
3547 delay(1000);
3548 }
3549 if (i == 0) {
3550 log(LOG_ERR, "%s: auto read from eeprom failed to "
3551 "complete\n", device_xname(sc->sc_dev));
3552 }
3553 break;
3554 default:
3555 delay(5000);
3556 break;
3557 }
3558
3559 /* Phy configuration starts after EECD_AUTO_RD is set */
3560 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574)
3561 delay(25000);
3562 }
3563
3564 /*
3565 * wm_acquire_eeprom:
3566 *
3567 * Perform the EEPROM handshake required on some chips.
3568 */
3569 static int
3570 wm_acquire_eeprom(struct wm_softc *sc)
3571 {
3572 uint32_t reg;
3573 int x;
3574 int ret = 0;
3575
3576 /* always success */
3577 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3578 return 0;
3579
3580 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3581 ret = wm_get_swfwhw_semaphore(sc);
3582 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3583 /* this will also do wm_get_swsm_semaphore() if needed */
3584 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3585 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3586 ret = wm_get_swsm_semaphore(sc);
3587 }
3588
3589 if (ret) {
3590 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3591 __func__);
3592 return 1;
3593 }
3594
3595 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3596 reg = CSR_READ(sc, WMREG_EECD);
3597
3598 /* Request EEPROM access. */
3599 reg |= EECD_EE_REQ;
3600 CSR_WRITE(sc, WMREG_EECD, reg);
3601
3602 /* ..and wait for it to be granted. */
3603 for (x = 0; x < 1000; x++) {
3604 reg = CSR_READ(sc, WMREG_EECD);
3605 if (reg & EECD_EE_GNT)
3606 break;
3607 delay(5);
3608 }
3609 if ((reg & EECD_EE_GNT) == 0) {
3610 aprint_error_dev(sc->sc_dev,
3611 "could not acquire EEPROM GNT\n");
3612 reg &= ~EECD_EE_REQ;
3613 CSR_WRITE(sc, WMREG_EECD, reg);
3614 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3615 wm_put_swfwhw_semaphore(sc);
3616 if (sc->sc_flags & WM_F_SWFW_SYNC)
3617 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3618 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3619 wm_put_swsm_semaphore(sc);
3620 return (1);
3621 }
3622 }
3623
3624 return (0);
3625 }
3626
3627 /*
3628 * wm_release_eeprom:
3629 *
3630 * Release the EEPROM mutex.
3631 */
3632 static void
3633 wm_release_eeprom(struct wm_softc *sc)
3634 {
3635 uint32_t reg;
3636
3637 /* always success */
3638 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3639 return;
3640
3641 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3642 reg = CSR_READ(sc, WMREG_EECD);
3643 reg &= ~EECD_EE_REQ;
3644 CSR_WRITE(sc, WMREG_EECD, reg);
3645 }
3646
3647 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3648 wm_put_swfwhw_semaphore(sc);
3649 if (sc->sc_flags & WM_F_SWFW_SYNC)
3650 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3651 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3652 wm_put_swsm_semaphore(sc);
3653 }
3654
3655 /*
3656 * wm_eeprom_sendbits:
3657 *
3658 * Send a series of bits to the EEPROM.
3659 */
3660 static void
3661 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3662 {
3663 uint32_t reg;
3664 int x;
3665
3666 reg = CSR_READ(sc, WMREG_EECD);
3667
3668 for (x = nbits; x > 0; x--) {
3669 if (bits & (1U << (x - 1)))
3670 reg |= EECD_DI;
3671 else
3672 reg &= ~EECD_DI;
3673 CSR_WRITE(sc, WMREG_EECD, reg);
3674 delay(2);
3675 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3676 delay(2);
3677 CSR_WRITE(sc, WMREG_EECD, reg);
3678 delay(2);
3679 }
3680 }
3681
3682 /*
3683 * wm_eeprom_recvbits:
3684 *
3685 * Receive a series of bits from the EEPROM.
3686 */
3687 static void
3688 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3689 {
3690 uint32_t reg, val;
3691 int x;
3692
3693 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3694
3695 val = 0;
3696 for (x = nbits; x > 0; x--) {
3697 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3698 delay(2);
3699 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3700 val |= (1U << (x - 1));
3701 CSR_WRITE(sc, WMREG_EECD, reg);
3702 delay(2);
3703 }
3704 *valp = val;
3705 }
3706
3707 /*
3708 * wm_read_eeprom_uwire:
3709 *
3710 * Read a word from the EEPROM using the MicroWire protocol.
3711 */
3712 static int
3713 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3714 {
3715 uint32_t reg, val;
3716 int i;
3717
3718 for (i = 0; i < wordcnt; i++) {
3719 /* Clear SK and DI. */
3720 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3721 CSR_WRITE(sc, WMREG_EECD, reg);
3722
3723 /* Set CHIP SELECT. */
3724 reg |= EECD_CS;
3725 CSR_WRITE(sc, WMREG_EECD, reg);
3726 delay(2);
3727
3728 /* Shift in the READ command. */
3729 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3730
3731 /* Shift in address. */
3732 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3733
3734 /* Shift out the data. */
3735 wm_eeprom_recvbits(sc, &val, 16);
3736 data[i] = val & 0xffff;
3737
3738 /* Clear CHIP SELECT. */
3739 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3740 CSR_WRITE(sc, WMREG_EECD, reg);
3741 delay(2);
3742 }
3743
3744 return (0);
3745 }
3746
3747 /*
3748 * wm_spi_eeprom_ready:
3749 *
3750 * Wait for a SPI EEPROM to be ready for commands.
3751 */
3752 static int
3753 wm_spi_eeprom_ready(struct wm_softc *sc)
3754 {
3755 uint32_t val;
3756 int usec;
3757
3758 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3759 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3760 wm_eeprom_recvbits(sc, &val, 8);
3761 if ((val & SPI_SR_RDY) == 0)
3762 break;
3763 }
3764 if (usec >= SPI_MAX_RETRIES) {
3765 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3766 return (1);
3767 }
3768 return (0);
3769 }
3770
3771 /*
3772 * wm_read_eeprom_spi:
3773 *
3774 * Read a work from the EEPROM using the SPI protocol.
3775 */
3776 static int
3777 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3778 {
3779 uint32_t reg, val;
3780 int i;
3781 uint8_t opc;
3782
3783 /* Clear SK and CS. */
3784 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3785 CSR_WRITE(sc, WMREG_EECD, reg);
3786 delay(2);
3787
3788 if (wm_spi_eeprom_ready(sc))
3789 return (1);
3790
3791 /* Toggle CS to flush commands. */
3792 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3793 delay(2);
3794 CSR_WRITE(sc, WMREG_EECD, reg);
3795 delay(2);
3796
3797 opc = SPI_OPC_READ;
3798 if (sc->sc_ee_addrbits == 8 && word >= 128)
3799 opc |= SPI_OPC_A8;
3800
3801 wm_eeprom_sendbits(sc, opc, 8);
3802 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3803
3804 for (i = 0; i < wordcnt; i++) {
3805 wm_eeprom_recvbits(sc, &val, 16);
3806 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3807 }
3808
3809 /* Raise CS and clear SK. */
3810 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3811 CSR_WRITE(sc, WMREG_EECD, reg);
3812 delay(2);
3813
3814 return (0);
3815 }
3816
3817 #define EEPROM_CHECKSUM 0xBABA
3818 #define EEPROM_SIZE 0x0040
3819
3820 /*
3821 * wm_validate_eeprom_checksum
3822 *
3823 * The checksum is defined as the sum of the first 64 (16 bit) words.
3824 */
3825 static int
3826 wm_validate_eeprom_checksum(struct wm_softc *sc)
3827 {
3828 uint16_t checksum;
3829 uint16_t eeprom_data;
3830 int i;
3831
3832 checksum = 0;
3833
3834 for (i = 0; i < EEPROM_SIZE; i++) {
3835 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3836 return 1;
3837 checksum += eeprom_data;
3838 }
3839
3840 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3841 return 1;
3842
3843 return 0;
3844 }
3845
3846 /*
3847 * wm_read_eeprom:
3848 *
3849 * Read data from the serial EEPROM.
3850 */
3851 static int
3852 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3853 {
3854 int rv;
3855
3856 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3857 return 1;
3858
3859 if (wm_acquire_eeprom(sc))
3860 return 1;
3861
3862 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3863 || (sc->sc_type == WM_T_ICH10))
3864 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3865 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3866 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3867 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3868 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3869 else
3870 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3871
3872 wm_release_eeprom(sc);
3873 return rv;
3874 }
3875
3876 static int
3877 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3878 uint16_t *data)
3879 {
3880 int i, eerd = 0;
3881 int error = 0;
3882
3883 for (i = 0; i < wordcnt; i++) {
3884 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3885
3886 CSR_WRITE(sc, WMREG_EERD, eerd);
3887 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3888 if (error != 0)
3889 break;
3890
3891 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3892 }
3893
3894 return error;
3895 }
3896
3897 static int
3898 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3899 {
3900 uint32_t attempts = 100000;
3901 uint32_t i, reg = 0;
3902 int32_t done = -1;
3903
3904 for (i = 0; i < attempts; i++) {
3905 reg = CSR_READ(sc, rw);
3906
3907 if (reg & EERD_DONE) {
3908 done = 0;
3909 break;
3910 }
3911 delay(5);
3912 }
3913
3914 return done;
3915 }
3916
3917 /*
3918 * wm_add_rxbuf:
3919 *
3920 * Add a receive buffer to the indiciated descriptor.
3921 */
3922 static int
3923 wm_add_rxbuf(struct wm_softc *sc, int idx)
3924 {
3925 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3926 struct mbuf *m;
3927 int error;
3928
3929 MGETHDR(m, M_DONTWAIT, MT_DATA);
3930 if (m == NULL)
3931 return (ENOBUFS);
3932
3933 MCLGET(m, M_DONTWAIT);
3934 if ((m->m_flags & M_EXT) == 0) {
3935 m_freem(m);
3936 return (ENOBUFS);
3937 }
3938
3939 if (rxs->rxs_mbuf != NULL)
3940 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3941
3942 rxs->rxs_mbuf = m;
3943
3944 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3945 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3946 BUS_DMA_READ|BUS_DMA_NOWAIT);
3947 if (error) {
3948 /* XXX XXX XXX */
3949 aprint_error_dev(sc->sc_dev,
3950 "unable to load rx DMA map %d, error = %d\n",
3951 idx, error);
3952 panic("wm_add_rxbuf");
3953 }
3954
3955 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3956 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3957
3958 WM_INIT_RXDESC(sc, idx);
3959
3960 return (0);
3961 }
3962
3963 /*
3964 * wm_set_ral:
3965 *
3966 * Set an entery in the receive address list.
3967 */
3968 static void
3969 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3970 {
3971 uint32_t ral_lo, ral_hi;
3972
3973 if (enaddr != NULL) {
3974 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3975 (enaddr[3] << 24);
3976 ral_hi = enaddr[4] | (enaddr[5] << 8);
3977 ral_hi |= RAL_AV;
3978 } else {
3979 ral_lo = 0;
3980 ral_hi = 0;
3981 }
3982
3983 if (sc->sc_type >= WM_T_82544) {
3984 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3985 ral_lo);
3986 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3987 ral_hi);
3988 } else {
3989 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3990 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3991 }
3992 }
3993
3994 /*
3995 * wm_mchash:
3996 *
3997 * Compute the hash of the multicast address for the 4096-bit
3998 * multicast filter.
3999 */
4000 static uint32_t
4001 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
4002 {
4003 static const int lo_shift[4] = { 4, 3, 2, 0 };
4004 static const int hi_shift[4] = { 4, 5, 6, 8 };
4005 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
4006 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
4007 uint32_t hash;
4008
4009 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4010 || (sc->sc_type == WM_T_ICH10)) {
4011 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
4012 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
4013 return (hash & 0x3ff);
4014 }
4015 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4016 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4017
4018 return (hash & 0xfff);
4019 }
4020
4021 /*
4022 * wm_set_filter:
4023 *
4024 * Set up the receive filter.
4025 */
4026 static void
4027 wm_set_filter(struct wm_softc *sc)
4028 {
4029 struct ethercom *ec = &sc->sc_ethercom;
4030 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4031 struct ether_multi *enm;
4032 struct ether_multistep step;
4033 bus_addr_t mta_reg;
4034 uint32_t hash, reg, bit;
4035 int i, size;
4036
4037 if (sc->sc_type >= WM_T_82544)
4038 mta_reg = WMREG_CORDOVA_MTA;
4039 else
4040 mta_reg = WMREG_MTA;
4041
4042 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4043
4044 if (ifp->if_flags & IFF_BROADCAST)
4045 sc->sc_rctl |= RCTL_BAM;
4046 if (ifp->if_flags & IFF_PROMISC) {
4047 sc->sc_rctl |= RCTL_UPE;
4048 goto allmulti;
4049 }
4050
4051 /*
4052 * Set the station address in the first RAL slot, and
4053 * clear the remaining slots.
4054 */
4055 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4056 || (sc->sc_type == WM_T_ICH10))
4057 size = WM_ICH8_RAL_TABSIZE;
4058 else
4059 size = WM_RAL_TABSIZE;
4060 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4061 for (i = 1; i < size; i++)
4062 wm_set_ral(sc, NULL, i);
4063
4064 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4065 || (sc->sc_type == WM_T_ICH10))
4066 size = WM_ICH8_MC_TABSIZE;
4067 else
4068 size = WM_MC_TABSIZE;
4069 /* Clear out the multicast table. */
4070 for (i = 0; i < size; i++)
4071 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4072
4073 ETHER_FIRST_MULTI(step, ec, enm);
4074 while (enm != NULL) {
4075 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4076 /*
4077 * We must listen to a range of multicast addresses.
4078 * For now, just accept all multicasts, rather than
4079 * trying to set only those filter bits needed to match
4080 * the range. (At this time, the only use of address
4081 * ranges is for IP multicast routing, for which the
4082 * range is big enough to require all bits set.)
4083 */
4084 goto allmulti;
4085 }
4086
4087 hash = wm_mchash(sc, enm->enm_addrlo);
4088
4089 reg = (hash >> 5);
4090 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4091 || (sc->sc_type == WM_T_ICH10))
4092 reg &= 0x1f;
4093 else
4094 reg &= 0x7f;
4095 bit = hash & 0x1f;
4096
4097 hash = CSR_READ(sc, mta_reg + (reg << 2));
4098 hash |= 1U << bit;
4099
4100 /* XXX Hardware bug?? */
4101 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4102 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4103 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4104 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4105 } else
4106 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4107
4108 ETHER_NEXT_MULTI(step, enm);
4109 }
4110
4111 ifp->if_flags &= ~IFF_ALLMULTI;
4112 goto setit;
4113
4114 allmulti:
4115 ifp->if_flags |= IFF_ALLMULTI;
4116 sc->sc_rctl |= RCTL_MPE;
4117
4118 setit:
4119 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4120 }
4121
4122 /*
4123 * wm_tbi_mediainit:
4124 *
4125 * Initialize media for use on 1000BASE-X devices.
4126 */
4127 static void
4128 wm_tbi_mediainit(struct wm_softc *sc)
4129 {
4130 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4131 const char *sep = "";
4132
4133 if (sc->sc_type < WM_T_82543)
4134 sc->sc_tipg = TIPG_WM_DFLT;
4135 else
4136 sc->sc_tipg = TIPG_LG_DFLT;
4137
4138 sc->sc_tbi_anegticks = 5;
4139
4140 /* Initialize our media structures */
4141 sc->sc_mii.mii_ifp = ifp;
4142
4143 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4144 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4145 wm_tbi_mediastatus);
4146
4147 /*
4148 * SWD Pins:
4149 *
4150 * 0 = Link LED (output)
4151 * 1 = Loss Of Signal (input)
4152 */
4153 sc->sc_ctrl |= CTRL_SWDPIO(0);
4154 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4155
4156 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4157
4158 #define ADD(ss, mm, dd) \
4159 do { \
4160 aprint_normal("%s%s", sep, ss); \
4161 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4162 sep = ", "; \
4163 } while (/*CONSTCOND*/0)
4164
4165 aprint_normal_dev(sc->sc_dev, "");
4166 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4167 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4168 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4169 aprint_normal("\n");
4170
4171 #undef ADD
4172
4173 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4174 }
4175
4176 /*
4177 * wm_tbi_mediastatus: [ifmedia interface function]
4178 *
4179 * Get the current interface media status on a 1000BASE-X device.
4180 */
4181 static void
4182 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4183 {
4184 struct wm_softc *sc = ifp->if_softc;
4185 uint32_t ctrl, status;
4186
4187 ifmr->ifm_status = IFM_AVALID;
4188 ifmr->ifm_active = IFM_ETHER;
4189
4190 status = CSR_READ(sc, WMREG_STATUS);
4191 if ((status & STATUS_LU) == 0) {
4192 ifmr->ifm_active |= IFM_NONE;
4193 return;
4194 }
4195
4196 ifmr->ifm_status |= IFM_ACTIVE;
4197 ifmr->ifm_active |= IFM_1000_SX;
4198 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4199 ifmr->ifm_active |= IFM_FDX;
4200 ctrl = CSR_READ(sc, WMREG_CTRL);
4201 if (ctrl & CTRL_RFCE)
4202 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4203 if (ctrl & CTRL_TFCE)
4204 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4205 }
4206
4207 /*
4208 * wm_tbi_mediachange: [ifmedia interface function]
4209 *
4210 * Set hardware to newly-selected media on a 1000BASE-X device.
4211 */
4212 static int
4213 wm_tbi_mediachange(struct ifnet *ifp)
4214 {
4215 struct wm_softc *sc = ifp->if_softc;
4216 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4217 uint32_t status;
4218 int i;
4219
4220 sc->sc_txcw = 0;
4221 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4222 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4223 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4224 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4225 sc->sc_txcw |= TXCW_ANE;
4226 } else {
4227 /*
4228 * If autonegotiation is turned off, force link up and turn on
4229 * full duplex
4230 */
4231 sc->sc_txcw &= ~TXCW_ANE;
4232 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4233 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4234 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4235 delay(1000);
4236 }
4237
4238 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4239 device_xname(sc->sc_dev),sc->sc_txcw));
4240 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4241 delay(10000);
4242
4243 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4244 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4245
4246 /*
4247 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4248 * optics detect a signal, 0 if they don't.
4249 */
4250 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4251 /* Have signal; wait for the link to come up. */
4252
4253 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4254 /*
4255 * Reset the link, and let autonegotiation do its thing
4256 */
4257 sc->sc_ctrl |= CTRL_LRST;
4258 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4259 delay(1000);
4260 sc->sc_ctrl &= ~CTRL_LRST;
4261 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4262 delay(1000);
4263 }
4264
4265 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4266 delay(10000);
4267 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4268 break;
4269 }
4270
4271 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4272 device_xname(sc->sc_dev),i));
4273
4274 status = CSR_READ(sc, WMREG_STATUS);
4275 DPRINTF(WM_DEBUG_LINK,
4276 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4277 device_xname(sc->sc_dev),status, STATUS_LU));
4278 if (status & STATUS_LU) {
4279 /* Link is up. */
4280 DPRINTF(WM_DEBUG_LINK,
4281 ("%s: LINK: set media -> link up %s\n",
4282 device_xname(sc->sc_dev),
4283 (status & STATUS_FD) ? "FDX" : "HDX"));
4284
4285 /*
4286 * NOTE: CTRL will update TFCE and RFCE automatically,
4287 * so we should update sc->sc_ctrl
4288 */
4289 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4290 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4291 sc->sc_fcrtl &= ~FCRTL_XONE;
4292 if (status & STATUS_FD)
4293 sc->sc_tctl |=
4294 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4295 else
4296 sc->sc_tctl |=
4297 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4298 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4299 sc->sc_fcrtl |= FCRTL_XONE;
4300 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4301 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4302 WMREG_OLD_FCRTL : WMREG_FCRTL,
4303 sc->sc_fcrtl);
4304 sc->sc_tbi_linkup = 1;
4305 } else {
4306 if (i == WM_LINKUP_TIMEOUT)
4307 wm_check_for_link(sc);
4308 /* Link is down. */
4309 DPRINTF(WM_DEBUG_LINK,
4310 ("%s: LINK: set media -> link down\n",
4311 device_xname(sc->sc_dev)));
4312 sc->sc_tbi_linkup = 0;
4313 }
4314 } else {
4315 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4316 device_xname(sc->sc_dev)));
4317 sc->sc_tbi_linkup = 0;
4318 }
4319
4320 wm_tbi_set_linkled(sc);
4321
4322 return (0);
4323 }
4324
4325 /*
4326 * wm_tbi_set_linkled:
4327 *
4328 * Update the link LED on 1000BASE-X devices.
4329 */
4330 static void
4331 wm_tbi_set_linkled(struct wm_softc *sc)
4332 {
4333
4334 if (sc->sc_tbi_linkup)
4335 sc->sc_ctrl |= CTRL_SWDPIN(0);
4336 else
4337 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4338
4339 /* 82540 or newer devices are active low */
4340 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4341
4342 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4343 }
4344
4345 /*
4346 * wm_tbi_check_link:
4347 *
4348 * Check the link on 1000BASE-X devices.
4349 */
4350 static void
4351 wm_tbi_check_link(struct wm_softc *sc)
4352 {
4353 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4354 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4355 uint32_t rxcw, ctrl, status;
4356
4357 status = CSR_READ(sc, WMREG_STATUS);
4358
4359 rxcw = CSR_READ(sc, WMREG_RXCW);
4360 ctrl = CSR_READ(sc, WMREG_CTRL);
4361
4362 /* set link status */
4363 if ((status & STATUS_LU) == 0) {
4364 DPRINTF(WM_DEBUG_LINK,
4365 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4366 sc->sc_tbi_linkup = 0;
4367 } else if (sc->sc_tbi_linkup == 0) {
4368 DPRINTF(WM_DEBUG_LINK,
4369 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4370 (status & STATUS_FD) ? "FDX" : "HDX"));
4371 sc->sc_tbi_linkup = 1;
4372 }
4373
4374 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4375 && ((status & STATUS_LU) == 0)) {
4376 sc->sc_tbi_linkup = 0;
4377 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4378 /* RXCFG storm! */
4379 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4380 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4381 wm_init(ifp);
4382 wm_start(ifp);
4383 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4384 /* If the timer expired, retry autonegotiation */
4385 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4386 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4387 sc->sc_tbi_ticks = 0;
4388 /*
4389 * Reset the link, and let autonegotiation do
4390 * its thing
4391 */
4392 sc->sc_ctrl |= CTRL_LRST;
4393 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4394 delay(1000);
4395 sc->sc_ctrl &= ~CTRL_LRST;
4396 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4397 delay(1000);
4398 CSR_WRITE(sc, WMREG_TXCW,
4399 sc->sc_txcw & ~TXCW_ANE);
4400 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4401 }
4402 }
4403 }
4404
4405 wm_tbi_set_linkled(sc);
4406 }
4407
4408 /*
4409 * wm_gmii_reset:
4410 *
4411 * Reset the PHY.
4412 */
4413 static void
4414 wm_gmii_reset(struct wm_softc *sc)
4415 {
4416 uint32_t reg;
4417 int func = 0; /* XXX gcc */
4418
4419 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4420 || (sc->sc_type == WM_T_ICH10)) {
4421 if (wm_get_swfwhw_semaphore(sc)) {
4422 aprint_error_dev(sc->sc_dev,
4423 "%s: failed to get semaphore\n", __func__);
4424 return;
4425 }
4426 }
4427 if (sc->sc_type == WM_T_80003) {
4428 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4429 if (wm_get_swfw_semaphore(sc,
4430 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4431 aprint_error_dev(sc->sc_dev,
4432 "%s: failed to get semaphore\n", __func__);
4433 return;
4434 }
4435 }
4436 if (sc->sc_type >= WM_T_82544) {
4437 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4438 delay(20000);
4439
4440 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4441 delay(20000);
4442 } else {
4443 /*
4444 * With 82543, we need to force speed and duplex on the MAC
4445 * equal to what the PHY speed and duplex configuration is.
4446 * In addition, we need to perform a hardware reset on the PHY
4447 * to take it out of reset.
4448 */
4449 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4450 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4451
4452 /* The PHY reset pin is active-low. */
4453 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4454 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4455 CTRL_EXT_SWDPIN(4));
4456 reg |= CTRL_EXT_SWDPIO(4);
4457
4458 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4459 delay(10);
4460
4461 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4462 delay(10000);
4463
4464 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4465 delay(10);
4466 #if 0
4467 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4468 #endif
4469 }
4470 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4471 || (sc->sc_type == WM_T_ICH10))
4472 wm_put_swfwhw_semaphore(sc);
4473 if (sc->sc_type == WM_T_80003)
4474 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4475 }
4476
4477 /*
4478 * wm_gmii_mediainit:
4479 *
4480 * Initialize media for use on 1000BASE-T devices.
4481 */
4482 static void
4483 wm_gmii_mediainit(struct wm_softc *sc)
4484 {
4485 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4486
4487 /* We have MII. */
4488 sc->sc_flags |= WM_F_HAS_MII;
4489
4490 if (sc->sc_type >= WM_T_80003)
4491 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4492 else
4493 sc->sc_tipg = TIPG_1000T_DFLT;
4494
4495 /*
4496 * Let the chip set speed/duplex on its own based on
4497 * signals from the PHY.
4498 * XXXbouyer - I'm not sure this is right for the 80003,
4499 * the em driver only sets CTRL_SLU here - but it seems to work.
4500 */
4501 sc->sc_ctrl |= CTRL_SLU;
4502 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4503
4504 /* Initialize our media structures and probe the GMII. */
4505 sc->sc_mii.mii_ifp = ifp;
4506
4507 if (sc->sc_type == WM_T_ICH10) {
4508 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4509 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4510 } else if (sc->sc_type >= WM_T_80003) {
4511 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4512 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4513 } else if (sc->sc_type >= WM_T_82544) {
4514 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4515 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4516 } else {
4517 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4518 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4519 }
4520 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4521
4522 wm_gmii_reset(sc);
4523
4524 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4525 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4526 wm_gmii_mediastatus);
4527
4528 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4529 MII_OFFSET_ANY, MIIF_DOPAUSE);
4530 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4531 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4532 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4533 } else
4534 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4535 }
4536
4537 /*
4538 * wm_gmii_mediastatus: [ifmedia interface function]
4539 *
4540 * Get the current interface media status on a 1000BASE-T device.
4541 */
4542 static void
4543 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4544 {
4545 struct wm_softc *sc = ifp->if_softc;
4546
4547 ether_mediastatus(ifp, ifmr);
4548 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4549 sc->sc_flowflags;
4550 }
4551
4552 /*
4553 * wm_gmii_mediachange: [ifmedia interface function]
4554 *
4555 * Set hardware to newly-selected media on a 1000BASE-T device.
4556 */
4557 static int
4558 wm_gmii_mediachange(struct ifnet *ifp)
4559 {
4560 struct wm_softc *sc = ifp->if_softc;
4561 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4562 int rc;
4563
4564 if ((ifp->if_flags & IFF_UP) == 0)
4565 return 0;
4566
4567 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4568 sc->sc_ctrl |= CTRL_SLU;
4569 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4570 || (sc->sc_type > WM_T_82543)) {
4571 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4572 } else {
4573 sc->sc_ctrl &= ~CTRL_ASDE;
4574 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4575 if (ife->ifm_media & IFM_FDX)
4576 sc->sc_ctrl |= CTRL_FD;
4577 switch(IFM_SUBTYPE(ife->ifm_media)) {
4578 case IFM_10_T:
4579 sc->sc_ctrl |= CTRL_SPEED_10;
4580 break;
4581 case IFM_100_TX:
4582 sc->sc_ctrl |= CTRL_SPEED_100;
4583 break;
4584 case IFM_1000_T:
4585 sc->sc_ctrl |= CTRL_SPEED_1000;
4586 break;
4587 default:
4588 panic("wm_gmii_mediachange: bad media 0x%x",
4589 ife->ifm_media);
4590 }
4591 }
4592 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4593 if (sc->sc_type <= WM_T_82543)
4594 wm_gmii_reset(sc);
4595
4596 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4597 return 0;
4598 return rc;
4599 }
4600
4601 #define MDI_IO CTRL_SWDPIN(2)
4602 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4603 #define MDI_CLK CTRL_SWDPIN(3)
4604
4605 static void
4606 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4607 {
4608 uint32_t i, v;
4609
4610 v = CSR_READ(sc, WMREG_CTRL);
4611 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4612 v |= MDI_DIR | CTRL_SWDPIO(3);
4613
4614 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4615 if (data & i)
4616 v |= MDI_IO;
4617 else
4618 v &= ~MDI_IO;
4619 CSR_WRITE(sc, WMREG_CTRL, v);
4620 delay(10);
4621 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4622 delay(10);
4623 CSR_WRITE(sc, WMREG_CTRL, v);
4624 delay(10);
4625 }
4626 }
4627
4628 static uint32_t
4629 i82543_mii_recvbits(struct wm_softc *sc)
4630 {
4631 uint32_t v, i, data = 0;
4632
4633 v = CSR_READ(sc, WMREG_CTRL);
4634 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4635 v |= CTRL_SWDPIO(3);
4636
4637 CSR_WRITE(sc, WMREG_CTRL, v);
4638 delay(10);
4639 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4640 delay(10);
4641 CSR_WRITE(sc, WMREG_CTRL, v);
4642 delay(10);
4643
4644 for (i = 0; i < 16; i++) {
4645 data <<= 1;
4646 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4647 delay(10);
4648 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4649 data |= 1;
4650 CSR_WRITE(sc, WMREG_CTRL, v);
4651 delay(10);
4652 }
4653
4654 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4655 delay(10);
4656 CSR_WRITE(sc, WMREG_CTRL, v);
4657 delay(10);
4658
4659 return (data);
4660 }
4661
4662 #undef MDI_IO
4663 #undef MDI_DIR
4664 #undef MDI_CLK
4665
4666 /*
4667 * wm_gmii_i82543_readreg: [mii interface function]
4668 *
4669 * Read a PHY register on the GMII (i82543 version).
4670 */
4671 static int
4672 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4673 {
4674 struct wm_softc *sc = device_private(self);
4675 int rv;
4676
4677 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4678 i82543_mii_sendbits(sc, reg | (phy << 5) |
4679 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4680 rv = i82543_mii_recvbits(sc) & 0xffff;
4681
4682 DPRINTF(WM_DEBUG_GMII,
4683 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4684 device_xname(sc->sc_dev), phy, reg, rv));
4685
4686 return (rv);
4687 }
4688
4689 /*
4690 * wm_gmii_i82543_writereg: [mii interface function]
4691 *
4692 * Write a PHY register on the GMII (i82543 version).
4693 */
4694 static void
4695 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4696 {
4697 struct wm_softc *sc = device_private(self);
4698
4699 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4700 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4701 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4702 (MII_COMMAND_START << 30), 32);
4703 }
4704
4705 /*
4706 * wm_gmii_i82544_readreg: [mii interface function]
4707 *
4708 * Read a PHY register on the GMII.
4709 */
4710 static int
4711 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4712 {
4713 struct wm_softc *sc = device_private(self);
4714 uint32_t mdic = 0;
4715 int i, rv;
4716
4717 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4718 MDIC_REGADD(reg));
4719
4720 for (i = 0; i < 320; i++) {
4721 mdic = CSR_READ(sc, WMREG_MDIC);
4722 if (mdic & MDIC_READY)
4723 break;
4724 delay(10);
4725 }
4726
4727 if ((mdic & MDIC_READY) == 0) {
4728 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4729 device_xname(sc->sc_dev), phy, reg);
4730 rv = 0;
4731 } else if (mdic & MDIC_E) {
4732 #if 0 /* This is normal if no PHY is present. */
4733 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4734 device_xname(sc->sc_dev), phy, reg);
4735 #endif
4736 rv = 0;
4737 } else {
4738 rv = MDIC_DATA(mdic);
4739 if (rv == 0xffff)
4740 rv = 0;
4741 }
4742
4743 return (rv);
4744 }
4745
4746 /*
4747 * wm_gmii_i82544_writereg: [mii interface function]
4748 *
4749 * Write a PHY register on the GMII.
4750 */
4751 static void
4752 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4753 {
4754 struct wm_softc *sc = device_private(self);
4755 uint32_t mdic = 0;
4756 int i;
4757
4758 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4759 MDIC_REGADD(reg) | MDIC_DATA(val));
4760
4761 for (i = 0; i < 320; i++) {
4762 mdic = CSR_READ(sc, WMREG_MDIC);
4763 if (mdic & MDIC_READY)
4764 break;
4765 delay(10);
4766 }
4767
4768 if ((mdic & MDIC_READY) == 0)
4769 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4770 device_xname(sc->sc_dev), phy, reg);
4771 else if (mdic & MDIC_E)
4772 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4773 device_xname(sc->sc_dev), phy, reg);
4774 }
4775
4776 /*
4777 * wm_gmii_i80003_readreg: [mii interface function]
4778 *
4779 * Read a PHY register on the kumeran
4780 * This could be handled by the PHY layer if we didn't have to lock the
4781 * ressource ...
4782 */
4783 static int
4784 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4785 {
4786 struct wm_softc *sc = device_private(self);
4787 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4788 int rv;
4789
4790 if (phy != 1) /* only one PHY on kumeran bus */
4791 return 0;
4792
4793 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4794 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4795 __func__);
4796 return 0;
4797 }
4798
4799 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4800 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4801 reg >> GG82563_PAGE_SHIFT);
4802 } else {
4803 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4804 reg >> GG82563_PAGE_SHIFT);
4805 }
4806 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4807 delay(200);
4808 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4809 delay(200);
4810
4811 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4812 return (rv);
4813 }
4814
4815 /*
4816 * wm_gmii_i80003_writereg: [mii interface function]
4817 *
4818 * Write a PHY register on the kumeran.
4819 * This could be handled by the PHY layer if we didn't have to lock the
4820 * ressource ...
4821 */
4822 static void
4823 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4824 {
4825 struct wm_softc *sc = device_private(self);
4826 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4827
4828 if (phy != 1) /* only one PHY on kumeran bus */
4829 return;
4830
4831 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4832 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4833 __func__);
4834 return;
4835 }
4836
4837 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4838 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4839 reg >> GG82563_PAGE_SHIFT);
4840 } else {
4841 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4842 reg >> GG82563_PAGE_SHIFT);
4843 }
4844 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4845 delay(200);
4846 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4847 delay(200);
4848
4849 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4850 }
4851
4852 /*
4853 * wm_gmii_bm_readreg: [mii interface function]
4854 *
4855 * Read a PHY register on the kumeran
4856 * This could be handled by the PHY layer if we didn't have to lock the
4857 * ressource ...
4858 */
4859 static int
4860 wm_gmii_bm_readreg(device_t self, int phy, int reg)
4861 {
4862 struct wm_softc *sc = device_private(self);
4863 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4864 int rv;
4865
4866 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4867 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4868 __func__);
4869 return 0;
4870 }
4871
4872 if (reg > GG82563_MAX_REG_ADDRESS) {
4873 if (phy == 1)
4874 wm_gmii_i82544_writereg(self, phy, 0x1f,
4875 reg);
4876 else
4877 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4878 reg >> GG82563_PAGE_SHIFT);
4879
4880 }
4881
4882 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4883 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4884 return (rv);
4885 }
4886
4887 /*
4888 * wm_gmii_bm_writereg: [mii interface function]
4889 *
4890 * Write a PHY register on the kumeran.
4891 * This could be handled by the PHY layer if we didn't have to lock the
4892 * ressource ...
4893 */
4894 static void
4895 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
4896 {
4897 struct wm_softc *sc = device_private(self);
4898 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4899
4900 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4901 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4902 __func__);
4903 return;
4904 }
4905
4906 if (reg > GG82563_MAX_REG_ADDRESS) {
4907 if (phy == 1)
4908 wm_gmii_i82544_writereg(self, phy, 0x1f,
4909 reg);
4910 else
4911 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4912 reg >> GG82563_PAGE_SHIFT);
4913
4914 }
4915
4916 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4917 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4918 }
4919
4920 /*
4921 * wm_gmii_statchg: [mii interface function]
4922 *
4923 * Callback from MII layer when media changes.
4924 */
4925 static void
4926 wm_gmii_statchg(device_t self)
4927 {
4928 struct wm_softc *sc = device_private(self);
4929 struct mii_data *mii = &sc->sc_mii;
4930
4931 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4932 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4933 sc->sc_fcrtl &= ~FCRTL_XONE;
4934
4935 /*
4936 * Get flow control negotiation result.
4937 */
4938 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4939 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4940 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4941 mii->mii_media_active &= ~IFM_ETH_FMASK;
4942 }
4943
4944 if (sc->sc_flowflags & IFM_FLOW) {
4945 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4946 sc->sc_ctrl |= CTRL_TFCE;
4947 sc->sc_fcrtl |= FCRTL_XONE;
4948 }
4949 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4950 sc->sc_ctrl |= CTRL_RFCE;
4951 }
4952
4953 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4954 DPRINTF(WM_DEBUG_LINK,
4955 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
4956 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4957 } else {
4958 DPRINTF(WM_DEBUG_LINK,
4959 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
4960 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4961 }
4962
4963 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4964 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4965 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4966 : WMREG_FCRTL, sc->sc_fcrtl);
4967 if (sc->sc_type >= WM_T_80003) {
4968 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4969 case IFM_1000_T:
4970 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4971 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4972 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4973 break;
4974 default:
4975 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4976 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4977 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4978 break;
4979 }
4980 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4981 }
4982 }
4983
4984 /*
4985 * wm_kmrn_i80003_readreg:
4986 *
4987 * Read a kumeran register
4988 */
4989 static int
4990 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4991 {
4992 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4993 int rv;
4994
4995 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4996 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4997 __func__);
4998 return 0;
4999 }
5000
5001 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5002 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5003 KUMCTRLSTA_REN);
5004 delay(2);
5005
5006 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
5007 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5008 return (rv);
5009 }
5010
5011 /*
5012 * wm_kmrn_i80003_writereg:
5013 *
5014 * Write a kumeran register
5015 */
5016 static void
5017 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
5018 {
5019 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5020
5021 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5022 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5023 __func__);
5024 return;
5025 }
5026
5027 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5028 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5029 (val & KUMCTRLSTA_MASK));
5030 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5031 }
5032
5033 static int
5034 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5035 {
5036 uint32_t eecd = 0;
5037
5038 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
5039 eecd = CSR_READ(sc, WMREG_EECD);
5040
5041 /* Isolate bits 15 & 16 */
5042 eecd = ((eecd >> 15) & 0x03);
5043
5044 /* If both bits are set, device is Flash type */
5045 if (eecd == 0x03) {
5046 return 0;
5047 }
5048 }
5049 return 1;
5050 }
5051
5052 static int
5053 wm_get_swsm_semaphore(struct wm_softc *sc)
5054 {
5055 int32_t timeout;
5056 uint32_t swsm;
5057
5058 /* Get the FW semaphore. */
5059 timeout = 1000 + 1; /* XXX */
5060 while (timeout) {
5061 swsm = CSR_READ(sc, WMREG_SWSM);
5062 swsm |= SWSM_SWESMBI;
5063 CSR_WRITE(sc, WMREG_SWSM, swsm);
5064 /* if we managed to set the bit we got the semaphore. */
5065 swsm = CSR_READ(sc, WMREG_SWSM);
5066 if (swsm & SWSM_SWESMBI)
5067 break;
5068
5069 delay(50);
5070 timeout--;
5071 }
5072
5073 if (timeout == 0) {
5074 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5075 /* Release semaphores */
5076 wm_put_swsm_semaphore(sc);
5077 return 1;
5078 }
5079 return 0;
5080 }
5081
5082 static void
5083 wm_put_swsm_semaphore(struct wm_softc *sc)
5084 {
5085 uint32_t swsm;
5086
5087 swsm = CSR_READ(sc, WMREG_SWSM);
5088 swsm &= ~(SWSM_SWESMBI);
5089 CSR_WRITE(sc, WMREG_SWSM, swsm);
5090 }
5091
5092 static int
5093 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5094 {
5095 uint32_t swfw_sync;
5096 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5097 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5098 int timeout = 200;
5099
5100 for(timeout = 0; timeout < 200; timeout++) {
5101 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5102 if (wm_get_swsm_semaphore(sc)) {
5103 aprint_error_dev(sc->sc_dev,
5104 "%s: failed to get semaphore\n",
5105 __func__);
5106 return 1;
5107 }
5108 }
5109 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5110 if ((swfw_sync & (swmask | fwmask)) == 0) {
5111 swfw_sync |= swmask;
5112 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5113 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5114 wm_put_swsm_semaphore(sc);
5115 return 0;
5116 }
5117 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5118 wm_put_swsm_semaphore(sc);
5119 delay(5000);
5120 }
5121 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5122 device_xname(sc->sc_dev), mask, swfw_sync);
5123 return 1;
5124 }
5125
5126 static void
5127 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5128 {
5129 uint32_t swfw_sync;
5130
5131 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5132 while (wm_get_swsm_semaphore(sc) != 0)
5133 continue;
5134 }
5135 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5136 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5137 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5138 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5139 wm_put_swsm_semaphore(sc);
5140 }
5141
5142 static int
5143 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5144 {
5145 uint32_t ext_ctrl;
5146 int timeout = 200;
5147
5148 for(timeout = 0; timeout < 200; timeout++) {
5149 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5150 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5151 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5152
5153 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5154 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5155 return 0;
5156 delay(5000);
5157 }
5158 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
5159 device_xname(sc->sc_dev), ext_ctrl);
5160 return 1;
5161 }
5162
5163 static void
5164 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5165 {
5166 uint32_t ext_ctrl;
5167 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5168 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5169 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5170 }
5171
5172 static int
5173 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5174 {
5175 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5176 uint8_t bank_high_byte;
5177 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5178
5179 if (sc->sc_type != WM_T_ICH10) {
5180 /* Value of bit 22 corresponds to the flash bank we're on. */
5181 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5182 } else {
5183 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5184 if ((bank_high_byte & 0xc0) == 0x80)
5185 *bank = 0;
5186 else {
5187 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5188 &bank_high_byte);
5189 if ((bank_high_byte & 0xc0) == 0x80)
5190 *bank = 1;
5191 else {
5192 aprint_error_dev(sc->sc_dev,
5193 "EEPROM not present\n");
5194 return -1;
5195 }
5196 }
5197 }
5198
5199 return 0;
5200 }
5201
5202 /******************************************************************************
5203 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5204 * register.
5205 *
5206 * sc - Struct containing variables accessed by shared code
5207 * offset - offset of word in the EEPROM to read
5208 * data - word read from the EEPROM
5209 * words - number of words to read
5210 *****************************************************************************/
5211 static int
5212 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5213 {
5214 int32_t error = 0;
5215 uint32_t flash_bank = 0;
5216 uint32_t act_offset = 0;
5217 uint32_t bank_offset = 0;
5218 uint16_t word = 0;
5219 uint16_t i = 0;
5220
5221 /* We need to know which is the valid flash bank. In the event
5222 * that we didn't allocate eeprom_shadow_ram, we may not be
5223 * managing flash_bank. So it cannot be trusted and needs
5224 * to be updated with each read.
5225 */
5226 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5227 if (error) {
5228 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5229 __func__);
5230 return error;
5231 }
5232
5233 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5234 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5235
5236 error = wm_get_swfwhw_semaphore(sc);
5237 if (error) {
5238 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5239 __func__);
5240 return error;
5241 }
5242
5243 for (i = 0; i < words; i++) {
5244 /* The NVM part needs a byte offset, hence * 2 */
5245 act_offset = bank_offset + ((offset + i) * 2);
5246 error = wm_read_ich8_word(sc, act_offset, &word);
5247 if (error) {
5248 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5249 __func__);
5250 break;
5251 }
5252 data[i] = word;
5253 }
5254
5255 wm_put_swfwhw_semaphore(sc);
5256 return error;
5257 }
5258
5259 /******************************************************************************
5260 * This function does initial flash setup so that a new read/write/erase cycle
5261 * can be started.
5262 *
5263 * sc - The pointer to the hw structure
5264 ****************************************************************************/
5265 static int32_t
5266 wm_ich8_cycle_init(struct wm_softc *sc)
5267 {
5268 uint16_t hsfsts;
5269 int32_t error = 1;
5270 int32_t i = 0;
5271
5272 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5273
5274 /* May be check the Flash Des Valid bit in Hw status */
5275 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5276 return error;
5277 }
5278
5279 /* Clear FCERR in Hw status by writing 1 */
5280 /* Clear DAEL in Hw status by writing a 1 */
5281 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5282
5283 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5284
5285 /* Either we should have a hardware SPI cycle in progress bit to check
5286 * against, in order to start a new cycle or FDONE bit should be changed
5287 * in the hardware so that it is 1 after harware reset, which can then be
5288 * used as an indication whether a cycle is in progress or has been
5289 * completed .. we should also have some software semaphore mechanism to
5290 * guard FDONE or the cycle in progress bit so that two threads access to
5291 * those bits can be sequentiallized or a way so that 2 threads dont
5292 * start the cycle at the same time */
5293
5294 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5295 /* There is no cycle running at present, so we can start a cycle */
5296 /* Begin by setting Flash Cycle Done. */
5297 hsfsts |= HSFSTS_DONE;
5298 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5299 error = 0;
5300 } else {
5301 /* otherwise poll for sometime so the current cycle has a chance
5302 * to end before giving up. */
5303 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5304 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5305 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5306 error = 0;
5307 break;
5308 }
5309 delay(1);
5310 }
5311 if (error == 0) {
5312 /* Successful in waiting for previous cycle to timeout,
5313 * now set the Flash Cycle Done. */
5314 hsfsts |= HSFSTS_DONE;
5315 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5316 }
5317 }
5318 return error;
5319 }
5320
5321 /******************************************************************************
5322 * This function starts a flash cycle and waits for its completion
5323 *
5324 * sc - The pointer to the hw structure
5325 ****************************************************************************/
5326 static int32_t
5327 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5328 {
5329 uint16_t hsflctl;
5330 uint16_t hsfsts;
5331 int32_t error = 1;
5332 uint32_t i = 0;
5333
5334 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5335 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5336 hsflctl |= HSFCTL_GO;
5337 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5338
5339 /* wait till FDONE bit is set to 1 */
5340 do {
5341 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5342 if (hsfsts & HSFSTS_DONE)
5343 break;
5344 delay(1);
5345 i++;
5346 } while (i < timeout);
5347 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5348 error = 0;
5349 }
5350 return error;
5351 }
5352
5353 /******************************************************************************
5354 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5355 *
5356 * sc - The pointer to the hw structure
5357 * index - The index of the byte or word to read.
5358 * size - Size of data to read, 1=byte 2=word
5359 * data - Pointer to the word to store the value read.
5360 *****************************************************************************/
5361 static int32_t
5362 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5363 uint32_t size, uint16_t* data)
5364 {
5365 uint16_t hsfsts;
5366 uint16_t hsflctl;
5367 uint32_t flash_linear_address;
5368 uint32_t flash_data = 0;
5369 int32_t error = 1;
5370 int32_t count = 0;
5371
5372 if (size < 1 || size > 2 || data == 0x0 ||
5373 index > ICH_FLASH_LINEAR_ADDR_MASK)
5374 return error;
5375
5376 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5377 sc->sc_ich8_flash_base;
5378
5379 do {
5380 delay(1);
5381 /* Steps */
5382 error = wm_ich8_cycle_init(sc);
5383 if (error)
5384 break;
5385
5386 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5387 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5388 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5389 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5390 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5391
5392 /* Write the last 24 bits of index into Flash Linear address field in
5393 * Flash Address */
5394 /* TODO: TBD maybe check the index against the size of flash */
5395
5396 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5397
5398 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5399
5400 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5401 * sequence a few more times, else read in (shift in) the Flash Data0,
5402 * the order is least significant byte first msb to lsb */
5403 if (error == 0) {
5404 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5405 if (size == 1) {
5406 *data = (uint8_t)(flash_data & 0x000000FF);
5407 } else if (size == 2) {
5408 *data = (uint16_t)(flash_data & 0x0000FFFF);
5409 }
5410 break;
5411 } else {
5412 /* If we've gotten here, then things are probably completely hosed,
5413 * but if the error condition is detected, it won't hurt to give
5414 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5415 */
5416 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5417 if (hsfsts & HSFSTS_ERR) {
5418 /* Repeat for some time before giving up. */
5419 continue;
5420 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5421 break;
5422 }
5423 }
5424 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5425
5426 return error;
5427 }
5428
5429 /******************************************************************************
5430 * Reads a single byte from the NVM using the ICH8 flash access registers.
5431 *
5432 * sc - pointer to wm_hw structure
5433 * index - The index of the byte to read.
5434 * data - Pointer to a byte to store the value read.
5435 *****************************************************************************/
5436 static int32_t
5437 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5438 {
5439 int32_t status;
5440 uint16_t word = 0;
5441
5442 status = wm_read_ich8_data(sc, index, 1, &word);
5443 if (status == 0) {
5444 *data = (uint8_t)word;
5445 }
5446
5447 return status;
5448 }
5449
5450 /******************************************************************************
5451 * Reads a word from the NVM using the ICH8 flash access registers.
5452 *
5453 * sc - pointer to wm_hw structure
5454 * index - The starting byte index of the word to read.
5455 * data - Pointer to a word to store the value read.
5456 *****************************************************************************/
5457 static int32_t
5458 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5459 {
5460 int32_t status;
5461
5462 status = wm_read_ich8_data(sc, index, 2, data);
5463 return status;
5464 }
5465
5466 static int
5467 wm_check_mng_mode(struct wm_softc *sc)
5468 {
5469 int rv;
5470
5471 switch (sc->sc_type) {
5472 case WM_T_ICH8:
5473 case WM_T_ICH9:
5474 case WM_T_ICH10:
5475 rv = wm_check_mng_mode_ich8lan(sc);
5476 break;
5477 #if 0
5478 case WM_T_82574:
5479 /*
5480 * The function is provided in em driver, but it's not
5481 * used. Why?
5482 */
5483 rv = wm_check_mng_mode_82574(sc);
5484 break;
5485 #endif
5486 case WM_T_82571:
5487 case WM_T_82572:
5488 case WM_T_82573:
5489 case WM_T_80003:
5490 rv = wm_check_mng_mode_generic(sc);
5491 break;
5492 default:
5493 /* noting to do */
5494 rv = 0;
5495 break;
5496 }
5497
5498 return rv;
5499 }
5500
5501 static int
5502 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5503 {
5504 uint32_t fwsm;
5505
5506 fwsm = CSR_READ(sc, WMREG_FWSM);
5507
5508 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5509 return 1;
5510
5511 return 0;
5512 }
5513
5514 #if 0
5515 static int
5516 wm_check_mng_mode_82574(struct wm_softc *sc)
5517 {
5518 uint16_t data;
5519
5520 wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5521
5522 if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5523 return 1;
5524
5525 return 0;
5526 }
5527 #endif
5528
5529 static int
5530 wm_check_mng_mode_generic(struct wm_softc *sc)
5531 {
5532 uint32_t fwsm;
5533
5534 fwsm = CSR_READ(sc, WMREG_FWSM);
5535
5536 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5537 return 1;
5538
5539 return 0;
5540 }
5541
5542 static void
5543 wm_get_hw_control(struct wm_softc *sc)
5544 {
5545 uint32_t reg;
5546
5547 switch (sc->sc_type) {
5548 case WM_T_82573:
5549 #if 0
5550 case WM_T_82574:
5551 /*
5552 * FreeBSD's em driver has the function for 82574 to checks
5553 * the management mode, but it's not used. Why?
5554 */
5555 #endif
5556 reg = CSR_READ(sc, WMREG_SWSM);
5557 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5558 break;
5559 case WM_T_82571:
5560 case WM_T_82572:
5561 case WM_T_80003:
5562 case WM_T_ICH8:
5563 case WM_T_ICH9:
5564 case WM_T_ICH10:
5565 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5566 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5567 break;
5568 default:
5569 break;
5570 }
5571 }
5572
5573 /* XXX Currently TBI only */
5574 static int
5575 wm_check_for_link(struct wm_softc *sc)
5576 {
5577 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5578 uint32_t rxcw;
5579 uint32_t ctrl;
5580 uint32_t status;
5581 uint32_t sig;
5582
5583 rxcw = CSR_READ(sc, WMREG_RXCW);
5584 ctrl = CSR_READ(sc, WMREG_CTRL);
5585 status = CSR_READ(sc, WMREG_STATUS);
5586
5587 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5588
5589 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5590 device_xname(sc->sc_dev), __func__,
5591 ((ctrl & CTRL_SWDPIN(1)) == sig),
5592 ((status & STATUS_LU) != 0),
5593 ((rxcw & RXCW_C) != 0)
5594 ));
5595
5596 /*
5597 * SWDPIN LU RXCW
5598 * 0 0 0
5599 * 0 0 1 (should not happen)
5600 * 0 1 0 (should not happen)
5601 * 0 1 1 (should not happen)
5602 * 1 0 0 Disable autonego and force linkup
5603 * 1 0 1 got /C/ but not linkup yet
5604 * 1 1 0 (linkup)
5605 * 1 1 1 If IFM_AUTO, back to autonego
5606 *
5607 */
5608 if (((ctrl & CTRL_SWDPIN(1)) == sig)
5609 && ((status & STATUS_LU) == 0)
5610 && ((rxcw & RXCW_C) == 0)) {
5611 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5612 __func__));
5613 sc->sc_tbi_linkup = 0;
5614 /* Disable auto-negotiation in the TXCW register */
5615 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5616
5617 /*
5618 * Force link-up and also force full-duplex.
5619 *
5620 * NOTE: CTRL was updated TFCE and RFCE automatically,
5621 * so we should update sc->sc_ctrl
5622 */
5623 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5624 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5625 } else if(((status & STATUS_LU) != 0)
5626 && ((rxcw & RXCW_C) != 0)
5627 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5628 sc->sc_tbi_linkup = 1;
5629 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5630 __func__));
5631 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5632 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5633 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5634 && ((rxcw & RXCW_C) != 0)) {
5635 DPRINTF(WM_DEBUG_LINK, ("/C/"));
5636 } else {
5637 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5638 status));
5639 }
5640
5641 return 0;
5642 }
5643