if_wm.c revision 1.173 1 /* $NetBSD: if_wm.c,v 1.173 2009/04/07 18:23:37 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers.
78 * - Fix hw VLAN assist.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.173 2009/04/07 18:23:37 msaitoh Exp $");
83
84 #include "bpfilter.h"
85 #include "rnd.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133
134 #include <dev/pci/if_wmreg.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 typedef enum {
236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */
253 WM_T_82574, /* i82574 */
254 WM_T_80003, /* i80003 */
255 WM_T_ICH8, /* ICH8 LAN */
256 WM_T_ICH9, /* ICH9 LAN */
257 WM_T_ICH10, /* ICH10 LAN */
258 } wm_chip_type;
259
260 #define WM_LINKUP_TIMEOUT 50
261
262 /*
263 * Software state per device.
264 */
265 struct wm_softc {
266 device_t sc_dev; /* generic device information */
267 bus_space_tag_t sc_st; /* bus space tag */
268 bus_space_handle_t sc_sh; /* bus space handle */
269 bus_space_tag_t sc_iot; /* I/O space tag */
270 bus_space_handle_t sc_ioh; /* I/O space handle */
271 bus_space_tag_t sc_flasht; /* flash registers space tag */
272 bus_space_handle_t sc_flashh; /* flash registers space handle */
273 bus_dma_tag_t sc_dmat; /* bus DMA tag */
274 struct ethercom sc_ethercom; /* ethernet common data */
275 pci_chipset_tag_t sc_pc;
276 pcitag_t sc_pcitag;
277
278 wm_chip_type sc_type; /* chip type */
279 int sc_flags; /* flags; see below */
280 int sc_bus_speed; /* PCI/PCIX bus speed */
281 int sc_pcix_offset; /* PCIX capability register offset */
282 int sc_flowflags; /* 802.3x flow control flags */
283
284 void *sc_ih; /* interrupt cookie */
285
286 int sc_ee_addrbits; /* EEPROM address bits */
287
288 struct mii_data sc_mii; /* MII/media information */
289
290 callout_t sc_tick_ch; /* tick callout */
291
292 bus_dmamap_t sc_cddmamap; /* control data DMA map */
293 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
294
295 int sc_align_tweak;
296
297 /*
298 * Software state for the transmit and receive descriptors.
299 */
300 int sc_txnum; /* must be a power of two */
301 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
302 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
303
304 /*
305 * Control data structures.
306 */
307 int sc_ntxdesc; /* must be a power of two */
308 struct wm_control_data_82544 *sc_control_data;
309 #define sc_txdescs sc_control_data->wcd_txdescs
310 #define sc_rxdescs sc_control_data->wcd_rxdescs
311
312 #ifdef WM_EVENT_COUNTERS
313 /* Event counters. */
314 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
315 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
316 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
317 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
318 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
319 struct evcnt sc_ev_rxintr; /* Rx interrupts */
320 struct evcnt sc_ev_linkintr; /* Link interrupts */
321
322 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
323 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
324 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
325 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
326 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
327 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
328 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
329 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
330
331 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
332 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
333
334 struct evcnt sc_ev_tu; /* Tx underrun */
335
336 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
337 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
339 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
340 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
341 #endif /* WM_EVENT_COUNTERS */
342
343 bus_addr_t sc_tdt_reg; /* offset of TDT register */
344
345 int sc_txfree; /* number of free Tx descriptors */
346 int sc_txnext; /* next ready Tx descriptor */
347
348 int sc_txsfree; /* number of free Tx jobs */
349 int sc_txsnext; /* next free Tx job */
350 int sc_txsdirty; /* dirty Tx jobs */
351
352 /* These 5 variables are used only on the 82547. */
353 int sc_txfifo_size; /* Tx FIFO size */
354 int sc_txfifo_head; /* current head of FIFO */
355 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
356 int sc_txfifo_stall; /* Tx FIFO is stalled */
357 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
358
359 bus_addr_t sc_rdt_reg; /* offset of RDT register */
360
361 int sc_rxptr; /* next ready Rx descriptor/queue ent */
362 int sc_rxdiscard;
363 int sc_rxlen;
364 struct mbuf *sc_rxhead;
365 struct mbuf *sc_rxtail;
366 struct mbuf **sc_rxtailp;
367
368 uint32_t sc_ctrl; /* prototype CTRL register */
369 #if 0
370 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
371 #endif
372 uint32_t sc_icr; /* prototype interrupt bits */
373 uint32_t sc_itr; /* prototype intr throttling reg */
374 uint32_t sc_tctl; /* prototype TCTL register */
375 uint32_t sc_rctl; /* prototype RCTL register */
376 uint32_t sc_txcw; /* prototype TXCW register */
377 uint32_t sc_tipg; /* prototype TIPG register */
378 uint32_t sc_fcrtl; /* prototype FCRTL register */
379 uint32_t sc_pba; /* prototype PBA register */
380
381 int sc_tbi_linkup; /* TBI link status */
382 int sc_tbi_anegticks; /* autonegotiation ticks */
383 int sc_tbi_ticks; /* tbi ticks */
384 int sc_tbi_nrxcfg; /* count of ICR_RXCFG */
385 int sc_tbi_lastnrxcfg; /* count of ICR_RXCFG (on last tick) */
386
387 int sc_mchash_type; /* multicast filter offset */
388
389 #if NRND > 0
390 rndsource_element_t rnd_source; /* random source */
391 #endif
392 int sc_ich8_flash_base;
393 int sc_ich8_flash_bank_size;
394 };
395
396 #define WM_RXCHAIN_RESET(sc) \
397 do { \
398 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
399 *(sc)->sc_rxtailp = NULL; \
400 (sc)->sc_rxlen = 0; \
401 } while (/*CONSTCOND*/0)
402
403 #define WM_RXCHAIN_LINK(sc, m) \
404 do { \
405 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
406 (sc)->sc_rxtailp = &(m)->m_next; \
407 } while (/*CONSTCOND*/0)
408
409 /* sc_flags */
410 #define WM_F_HAS_MII 0x0001 /* has MII */
411 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
412 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
413 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
414 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
415 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
416 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
417 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
418 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
419 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
420 #define WM_F_CSA 0x0400 /* bus is CSA */
421 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
422 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
423 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
424
425 #ifdef WM_EVENT_COUNTERS
426 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
427 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
428 #else
429 #define WM_EVCNT_INCR(ev) /* nothing */
430 #define WM_EVCNT_ADD(ev, val) /* nothing */
431 #endif
432
433 #define CSR_READ(sc, reg) \
434 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
435 #define CSR_WRITE(sc, reg, val) \
436 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
437 #define CSR_WRITE_FLUSH(sc) \
438 (void) CSR_READ((sc), WMREG_STATUS)
439
440 #define ICH8_FLASH_READ32(sc, reg) \
441 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
442 #define ICH8_FLASH_WRITE32(sc, reg, data) \
443 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
444
445 #define ICH8_FLASH_READ16(sc, reg) \
446 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
447 #define ICH8_FLASH_WRITE16(sc, reg, data) \
448 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
449
450 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
451 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
452
453 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
454 #define WM_CDTXADDR_HI(sc, x) \
455 (sizeof(bus_addr_t) == 8 ? \
456 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
457
458 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
459 #define WM_CDRXADDR_HI(sc, x) \
460 (sizeof(bus_addr_t) == 8 ? \
461 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
462
463 #define WM_CDTXSYNC(sc, x, n, ops) \
464 do { \
465 int __x, __n; \
466 \
467 __x = (x); \
468 __n = (n); \
469 \
470 /* If it will wrap around, sync to the end of the ring. */ \
471 if ((__x + __n) > WM_NTXDESC(sc)) { \
472 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
473 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
474 (WM_NTXDESC(sc) - __x), (ops)); \
475 __n -= (WM_NTXDESC(sc) - __x); \
476 __x = 0; \
477 } \
478 \
479 /* Now sync whatever is left. */ \
480 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
481 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
482 } while (/*CONSTCOND*/0)
483
484 #define WM_CDRXSYNC(sc, x, ops) \
485 do { \
486 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
487 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
488 } while (/*CONSTCOND*/0)
489
490 #define WM_INIT_RXDESC(sc, x) \
491 do { \
492 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
493 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
494 struct mbuf *__m = __rxs->rxs_mbuf; \
495 \
496 /* \
497 * Note: We scoot the packet forward 2 bytes in the buffer \
498 * so that the payload after the Ethernet header is aligned \
499 * to a 4-byte boundary. \
500 * \
501 * XXX BRAINDAMAGE ALERT! \
502 * The stupid chip uses the same size for every buffer, which \
503 * is set in the Receive Control register. We are using the 2K \
504 * size option, but what we REALLY want is (2K - 2)! For this \
505 * reason, we can't "scoot" packets longer than the standard \
506 * Ethernet MTU. On strict-alignment platforms, if the total \
507 * size exceeds (2K - 2) we set align_tweak to 0 and let \
508 * the upper layer copy the headers. \
509 */ \
510 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
511 \
512 wm_set_dma_addr(&__rxd->wrx_addr, \
513 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
514 __rxd->wrx_len = 0; \
515 __rxd->wrx_cksum = 0; \
516 __rxd->wrx_status = 0; \
517 __rxd->wrx_errors = 0; \
518 __rxd->wrx_special = 0; \
519 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
520 \
521 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
522 } while (/*CONSTCOND*/0)
523
524 static void wm_start(struct ifnet *);
525 static void wm_watchdog(struct ifnet *);
526 static int wm_ioctl(struct ifnet *, u_long, void *);
527 static int wm_init(struct ifnet *);
528 static void wm_stop(struct ifnet *, int);
529
530 static void wm_reset(struct wm_softc *);
531 static void wm_rxdrain(struct wm_softc *);
532 static int wm_add_rxbuf(struct wm_softc *, int);
533 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
534 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
535 static int wm_validate_eeprom_checksum(struct wm_softc *);
536 static void wm_tick(void *);
537
538 static void wm_set_filter(struct wm_softc *);
539
540 static int wm_intr(void *);
541 static void wm_txintr(struct wm_softc *);
542 static void wm_rxintr(struct wm_softc *);
543 static void wm_linkintr(struct wm_softc *, uint32_t);
544
545 static void wm_tbi_mediainit(struct wm_softc *);
546 static int wm_tbi_mediachange(struct ifnet *);
547 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
548
549 static void wm_tbi_set_linkled(struct wm_softc *);
550 static void wm_tbi_check_link(struct wm_softc *);
551
552 static void wm_gmii_reset(struct wm_softc *);
553
554 static int wm_gmii_i82543_readreg(device_t, int, int);
555 static void wm_gmii_i82543_writereg(device_t, int, int, int);
556
557 static int wm_gmii_i82544_readreg(device_t, int, int);
558 static void wm_gmii_i82544_writereg(device_t, int, int, int);
559
560 static int wm_gmii_i80003_readreg(device_t, int, int);
561 static void wm_gmii_i80003_writereg(device_t, int, int, int);
562
563 static int wm_gmii_bm_readreg(device_t, int, int);
564 static void wm_gmii_bm_writereg(device_t, int, int, int);
565
566 static void wm_gmii_statchg(device_t);
567
568 static void wm_gmii_mediainit(struct wm_softc *);
569 static int wm_gmii_mediachange(struct ifnet *);
570 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
571
572 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
573 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
574
575 static int wm_match(device_t, cfdata_t, void *);
576 static void wm_attach(device_t, device_t, void *);
577 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
578 static void wm_get_auto_rd_done(struct wm_softc *);
579 static int wm_get_swsm_semaphore(struct wm_softc *);
580 static void wm_put_swsm_semaphore(struct wm_softc *);
581 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
582 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
583 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
584 static int wm_get_swfwhw_semaphore(struct wm_softc *);
585 static void wm_put_swfwhw_semaphore(struct wm_softc *);
586
587 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
588 static int32_t wm_ich8_cycle_init(struct wm_softc *);
589 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
590 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
591 uint32_t, uint16_t *);
592 static int32_t wm_read_ich8_byte(struct wm_softc *sc, uint32_t, uint8_t *);
593 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
594 static void wm_82547_txfifo_stall(void *);
595 static int wm_check_mng_mode(struct wm_softc *);
596 static int wm_check_mng_mode_ich8lan(struct wm_softc *);
597 #if 0
598 static int wm_check_mng_mode_82574(struct wm_softc *);
599 #endif
600 static int wm_check_mng_mode_generic(struct wm_softc *);
601 static void wm_get_hw_control(struct wm_softc *);
602 static int wm_check_for_link(struct wm_softc *);
603
604 CFATTACH_DECL_NEW(wm, sizeof(struct wm_softc),
605 wm_match, wm_attach, NULL, NULL);
606
607
608 /*
609 * Devices supported by this driver.
610 */
611 static const struct wm_product {
612 pci_vendor_id_t wmp_vendor;
613 pci_product_id_t wmp_product;
614 const char *wmp_name;
615 wm_chip_type wmp_type;
616 int wmp_flags;
617 #define WMP_F_1000X 0x01
618 #define WMP_F_1000T 0x02
619 } wm_products[] = {
620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
621 "Intel i82542 1000BASE-X Ethernet",
622 WM_T_82542_2_1, WMP_F_1000X },
623
624 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
625 "Intel i82543GC 1000BASE-X Ethernet",
626 WM_T_82543, WMP_F_1000X },
627
628 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
629 "Intel i82543GC 1000BASE-T Ethernet",
630 WM_T_82543, WMP_F_1000T },
631
632 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
633 "Intel i82544EI 1000BASE-T Ethernet",
634 WM_T_82544, WMP_F_1000T },
635
636 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
637 "Intel i82544EI 1000BASE-X Ethernet",
638 WM_T_82544, WMP_F_1000X },
639
640 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
641 "Intel i82544GC 1000BASE-T Ethernet",
642 WM_T_82544, WMP_F_1000T },
643
644 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
645 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
646 WM_T_82544, WMP_F_1000T },
647
648 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
649 "Intel i82540EM 1000BASE-T Ethernet",
650 WM_T_82540, WMP_F_1000T },
651
652 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
653 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
654 WM_T_82540, WMP_F_1000T },
655
656 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
657 "Intel i82540EP 1000BASE-T Ethernet",
658 WM_T_82540, WMP_F_1000T },
659
660 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
661 "Intel i82540EP 1000BASE-T Ethernet",
662 WM_T_82540, WMP_F_1000T },
663
664 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
665 "Intel i82540EP 1000BASE-T Ethernet",
666 WM_T_82540, WMP_F_1000T },
667
668 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
669 "Intel i82545EM 1000BASE-T Ethernet",
670 WM_T_82545, WMP_F_1000T },
671
672 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
673 "Intel i82545GM 1000BASE-T Ethernet",
674 WM_T_82545_3, WMP_F_1000T },
675
676 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
677 "Intel i82545GM 1000BASE-X Ethernet",
678 WM_T_82545_3, WMP_F_1000X },
679 #if 0
680 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
681 "Intel i82545GM Gigabit Ethernet (SERDES)",
682 WM_T_82545_3, WMP_F_SERDES },
683 #endif
684 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
685 "Intel i82546EB 1000BASE-T Ethernet",
686 WM_T_82546, WMP_F_1000T },
687
688 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
689 "Intel i82546EB 1000BASE-T Ethernet",
690 WM_T_82546, WMP_F_1000T },
691
692 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
693 "Intel i82545EM 1000BASE-X Ethernet",
694 WM_T_82545, WMP_F_1000X },
695
696 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
697 "Intel i82546EB 1000BASE-X Ethernet",
698 WM_T_82546, WMP_F_1000X },
699
700 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
701 "Intel i82546GB 1000BASE-T Ethernet",
702 WM_T_82546_3, WMP_F_1000T },
703
704 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
705 "Intel i82546GB 1000BASE-X Ethernet",
706 WM_T_82546_3, WMP_F_1000X },
707 #if 0
708 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
709 "Intel i82546GB Gigabit Ethernet (SERDES)",
710 WM_T_82546_3, WMP_F_SERDES },
711 #endif
712 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
713 "i82546GB quad-port Gigabit Ethernet",
714 WM_T_82546_3, WMP_F_1000T },
715
716 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
717 "i82546GB quad-port Gigabit Ethernet (KSP3)",
718 WM_T_82546_3, WMP_F_1000T },
719
720 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
721 "Intel PRO/1000MT (82546GB)",
722 WM_T_82546_3, WMP_F_1000T },
723
724 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
725 "Intel i82541EI 1000BASE-T Ethernet",
726 WM_T_82541, WMP_F_1000T },
727
728 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
729 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
730 WM_T_82541, WMP_F_1000T },
731
732 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
733 "Intel i82541EI Mobile 1000BASE-T Ethernet",
734 WM_T_82541, WMP_F_1000T },
735
736 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
737 "Intel i82541ER 1000BASE-T Ethernet",
738 WM_T_82541_2, WMP_F_1000T },
739
740 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
741 "Intel i82541GI 1000BASE-T Ethernet",
742 WM_T_82541_2, WMP_F_1000T },
743
744 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
745 "Intel i82541GI Mobile 1000BASE-T Ethernet",
746 WM_T_82541_2, WMP_F_1000T },
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
749 "Intel i82541PI 1000BASE-T Ethernet",
750 WM_T_82541_2, WMP_F_1000T },
751
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
753 "Intel i82547EI 1000BASE-T Ethernet",
754 WM_T_82547, WMP_F_1000T },
755
756 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
757 "Intel i82547EI Mobile 1000BASE-T Ethernet",
758 WM_T_82547, WMP_F_1000T },
759
760 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
761 "Intel i82547GI 1000BASE-T Ethernet",
762 WM_T_82547_2, WMP_F_1000T },
763
764 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
765 "Intel PRO/1000 PT (82571EB)",
766 WM_T_82571, WMP_F_1000T },
767
768 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
769 "Intel PRO/1000 PF (82571EB)",
770 WM_T_82571, WMP_F_1000X },
771 #if 0
772 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
773 "Intel PRO/1000 PB (82571EB)",
774 WM_T_82571, WMP_F_SERDES },
775 #endif
776 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
777 "Intel PRO/1000 QT (82571EB)",
778 WM_T_82571, WMP_F_1000T },
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
781 "Intel i82572EI 1000baseT Ethernet",
782 WM_T_82572, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
785 "Intel PRO/1000 PT Quad Port Server Adapter",
786 WM_T_82571, WMP_F_1000T, },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
789 "Intel i82572EI 1000baseX Ethernet",
790 WM_T_82572, WMP_F_1000X },
791 #if 0
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
793 "Intel i82572EI Gigabit Ethernet (SERDES)",
794 WM_T_82572, WMP_F_SERDES },
795 #endif
796
797 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
798 "Intel i82572EI 1000baseT Ethernet",
799 WM_T_82572, WMP_F_1000T },
800
801 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
802 "Intel i82573E",
803 WM_T_82573, WMP_F_1000T },
804
805 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
806 "Intel i82573E IAMT",
807 WM_T_82573, WMP_F_1000T },
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
810 "Intel i82573L Gigabit Ethernet",
811 WM_T_82573, WMP_F_1000T },
812
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82574L,
814 "Intel i82574L",
815 WM_T_82574, WMP_F_1000T },
816
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
818 "i80003 dual 1000baseT Ethernet",
819 WM_T_80003, WMP_F_1000T },
820
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
822 "i80003 dual 1000baseX Ethernet",
823 WM_T_80003, WMP_F_1000T },
824 #if 0
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
826 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
827 WM_T_80003, WMP_F_SERDES },
828 #endif
829
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
831 "Intel i80003 1000baseT Ethernet",
832 WM_T_80003, WMP_F_1000T },
833 #if 0
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
835 "Intel i80003 Gigabit Ethernet (SERDES)",
836 WM_T_80003, WMP_F_SERDES },
837 #endif
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
839 "Intel i82801H (M_AMT) LAN Controller",
840 WM_T_ICH8, WMP_F_1000T },
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
842 "Intel i82801H (AMT) LAN Controller",
843 WM_T_ICH8, WMP_F_1000T },
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
845 "Intel i82801H LAN Controller",
846 WM_T_ICH8, WMP_F_1000T },
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
848 "Intel i82801H (IFE) LAN Controller",
849 WM_T_ICH8, WMP_F_1000T },
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
851 "Intel i82801H (M) LAN Controller",
852 WM_T_ICH8, WMP_F_1000T },
853 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
854 "Intel i82801H IFE (GT) LAN Controller",
855 WM_T_ICH8, WMP_F_1000T },
856 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
857 "Intel i82801H IFE (G) LAN Controller",
858 WM_T_ICH8, WMP_F_1000T },
859 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
860 "82801I (AMT) LAN Controller",
861 WM_T_ICH9, WMP_F_1000T },
862 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
863 "82801I LAN Controller",
864 WM_T_ICH9, WMP_F_1000T },
865 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
866 "82801I (G) LAN Controller",
867 WM_T_ICH9, WMP_F_1000T },
868 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
869 "82801I (GT) LAN Controller",
870 WM_T_ICH9, WMP_F_1000T },
871 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
872 "82801I (C) LAN Controller",
873 WM_T_ICH9, WMP_F_1000T },
874 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M,
875 "82801I mobile LAN Controller",
876 WM_T_ICH9, WMP_F_1000T },
877 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IGP_M_V,
878 "82801I mobile (V) LAN Controller",
879 WM_T_ICH9, WMP_F_1000T },
880 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
881 "82801I mobile (AMT) LAN Controller",
882 WM_T_ICH9, WMP_F_1000T },
883 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LM_3,
884 "82567LM-3 LAN Controller",
885 WM_T_ICH10, WMP_F_1000T },
886 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82567LF_3,
887 "82567LF-3 LAN Controller",
888 WM_T_ICH10, WMP_F_1000T },
889 { 0, 0,
890 NULL,
891 0, 0 },
892 };
893
894 #ifdef WM_EVENT_COUNTERS
895 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
896 #endif /* WM_EVENT_COUNTERS */
897
898 #if 0 /* Not currently used */
899 static inline uint32_t
900 wm_io_read(struct wm_softc *sc, int reg)
901 {
902
903 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
904 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
905 }
906 #endif
907
908 static inline void
909 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
910 {
911
912 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
913 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
914 }
915
916 static inline void
917 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
918 {
919 wa->wa_low = htole32(v & 0xffffffffU);
920 if (sizeof(bus_addr_t) == 8)
921 wa->wa_high = htole32((uint64_t) v >> 32);
922 else
923 wa->wa_high = 0;
924 }
925
926 static const struct wm_product *
927 wm_lookup(const struct pci_attach_args *pa)
928 {
929 const struct wm_product *wmp;
930
931 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
932 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
933 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
934 return (wmp);
935 }
936 return (NULL);
937 }
938
939 static int
940 wm_match(device_t parent, cfdata_t cf, void *aux)
941 {
942 struct pci_attach_args *pa = aux;
943
944 if (wm_lookup(pa) != NULL)
945 return (1);
946
947 return (0);
948 }
949
950 static void
951 wm_attach(device_t parent, device_t self, void *aux)
952 {
953 struct wm_softc *sc = device_private(self);
954 struct pci_attach_args *pa = aux;
955 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
956 pci_chipset_tag_t pc = pa->pa_pc;
957 pci_intr_handle_t ih;
958 size_t cdata_size;
959 const char *intrstr = NULL;
960 const char *eetype, *xname;
961 bus_space_tag_t memt;
962 bus_space_handle_t memh;
963 bus_dma_segment_t seg;
964 int memh_valid;
965 int i, rseg, error;
966 const struct wm_product *wmp;
967 prop_data_t ea;
968 prop_number_t pn;
969 uint8_t enaddr[ETHER_ADDR_LEN];
970 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
971 pcireg_t preg, memtype;
972 uint32_t reg;
973
974 sc->sc_dev = self;
975 callout_init(&sc->sc_tick_ch, 0);
976
977 wmp = wm_lookup(pa);
978 if (wmp == NULL) {
979 printf("\n");
980 panic("wm_attach: impossible");
981 }
982
983 sc->sc_pc = pa->pa_pc;
984 sc->sc_pcitag = pa->pa_tag;
985
986 if (pci_dma64_available(pa))
987 sc->sc_dmat = pa->pa_dmat64;
988 else
989 sc->sc_dmat = pa->pa_dmat;
990
991 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
992 aprint_naive(": Ethernet controller\n");
993 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
994
995 sc->sc_type = wmp->wmp_type;
996 if (sc->sc_type < WM_T_82543) {
997 if (preg < 2) {
998 aprint_error_dev(sc->sc_dev,
999 "i82542 must be at least rev. 2\n");
1000 return;
1001 }
1002 if (preg < 3)
1003 sc->sc_type = WM_T_82542_2_0;
1004 }
1005
1006 /*
1007 * Map the device. All devices support memory-mapped acccess,
1008 * and it is really required for normal operation.
1009 */
1010 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1011 switch (memtype) {
1012 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1013 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1014 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1015 memtype, 0, &memt, &memh, NULL, NULL) == 0);
1016 break;
1017 default:
1018 memh_valid = 0;
1019 }
1020
1021 if (memh_valid) {
1022 sc->sc_st = memt;
1023 sc->sc_sh = memh;
1024 } else {
1025 aprint_error_dev(sc->sc_dev,
1026 "unable to map device registers\n");
1027 return;
1028 }
1029
1030 /*
1031 * In addition, i82544 and later support I/O mapped indirect
1032 * register access. It is not desirable (nor supported in
1033 * this driver) to use it for normal operation, though it is
1034 * required to work around bugs in some chip versions.
1035 */
1036 if (sc->sc_type >= WM_T_82544) {
1037 /* First we have to find the I/O BAR. */
1038 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1039 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1040 PCI_MAPREG_TYPE_IO)
1041 break;
1042 }
1043 if (i == PCI_MAPREG_END)
1044 aprint_error_dev(sc->sc_dev,
1045 "WARNING: unable to find I/O BAR\n");
1046 else {
1047 /*
1048 * The i8254x doesn't apparently respond when the
1049 * I/O BAR is 0, which looks somewhat like it's not
1050 * been configured.
1051 */
1052 preg = pci_conf_read(pc, pa->pa_tag, i);
1053 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1054 aprint_error_dev(sc->sc_dev,
1055 "WARNING: I/O BAR at zero.\n");
1056 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1057 0, &sc->sc_iot, &sc->sc_ioh,
1058 NULL, NULL) == 0) {
1059 sc->sc_flags |= WM_F_IOH_VALID;
1060 } else {
1061 aprint_error_dev(sc->sc_dev,
1062 "WARNING: unable to map I/O space\n");
1063 }
1064 }
1065
1066 }
1067
1068 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1069 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1070 preg |= PCI_COMMAND_MASTER_ENABLE;
1071 if (sc->sc_type < WM_T_82542_2_1)
1072 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1073 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1074
1075 /* power up chip */
1076 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1077 NULL)) && error != EOPNOTSUPP) {
1078 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1079 return;
1080 }
1081
1082 /*
1083 * Map and establish our interrupt.
1084 */
1085 if (pci_intr_map(pa, &ih)) {
1086 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1087 return;
1088 }
1089 intrstr = pci_intr_string(pc, ih);
1090 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1091 if (sc->sc_ih == NULL) {
1092 aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1093 if (intrstr != NULL)
1094 aprint_normal(" at %s", intrstr);
1095 aprint_normal("\n");
1096 return;
1097 }
1098 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1099
1100 /*
1101 * Determine a few things about the bus we're connected to.
1102 */
1103 if (sc->sc_type < WM_T_82543) {
1104 /* We don't really know the bus characteristics here. */
1105 sc->sc_bus_speed = 33;
1106 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1107 /*
1108 * CSA (Communication Streaming Architecture) is about as fast
1109 * a 32-bit 66MHz PCI Bus.
1110 */
1111 sc->sc_flags |= WM_F_CSA;
1112 sc->sc_bus_speed = 66;
1113 aprint_verbose_dev(sc->sc_dev,
1114 "Communication Streaming Architecture\n");
1115 if (sc->sc_type == WM_T_82547) {
1116 callout_init(&sc->sc_txfifo_ch, 0);
1117 callout_setfunc(&sc->sc_txfifo_ch,
1118 wm_82547_txfifo_stall, sc);
1119 aprint_verbose_dev(sc->sc_dev,
1120 "using 82547 Tx FIFO stall work-around\n");
1121 }
1122 } else if (sc->sc_type >= WM_T_82571) {
1123 sc->sc_flags |= WM_F_PCIE;
1124 if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1125 && (sc->sc_type != WM_T_ICH10))
1126 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1127 aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1128 } else {
1129 reg = CSR_READ(sc, WMREG_STATUS);
1130 if (reg & STATUS_BUS64)
1131 sc->sc_flags |= WM_F_BUS64;
1132 if (sc->sc_type >= WM_T_82544 &&
1133 (reg & STATUS_PCIX_MODE) != 0) {
1134 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1135
1136 sc->sc_flags |= WM_F_PCIX;
1137 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1138 PCI_CAP_PCIX,
1139 &sc->sc_pcix_offset, NULL) == 0)
1140 aprint_error_dev(sc->sc_dev,
1141 "unable to find PCIX capability\n");
1142 else if (sc->sc_type != WM_T_82545_3 &&
1143 sc->sc_type != WM_T_82546_3) {
1144 /*
1145 * Work around a problem caused by the BIOS
1146 * setting the max memory read byte count
1147 * incorrectly.
1148 */
1149 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1150 sc->sc_pcix_offset + PCI_PCIX_CMD);
1151 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1152 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1153
1154 bytecnt =
1155 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1156 PCI_PCIX_CMD_BYTECNT_SHIFT;
1157 maxb =
1158 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1159 PCI_PCIX_STATUS_MAXB_SHIFT;
1160 if (bytecnt > maxb) {
1161 aprint_verbose_dev(sc->sc_dev,
1162 "resetting PCI-X MMRBC: %d -> %d\n",
1163 512 << bytecnt, 512 << maxb);
1164 pcix_cmd = (pcix_cmd &
1165 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1166 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1167 pci_conf_write(pa->pa_pc, pa->pa_tag,
1168 sc->sc_pcix_offset + PCI_PCIX_CMD,
1169 pcix_cmd);
1170 }
1171 }
1172 }
1173 /*
1174 * The quad port adapter is special; it has a PCIX-PCIX
1175 * bridge on the board, and can run the secondary bus at
1176 * a higher speed.
1177 */
1178 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1179 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1180 : 66;
1181 } else if (sc->sc_flags & WM_F_PCIX) {
1182 switch (reg & STATUS_PCIXSPD_MASK) {
1183 case STATUS_PCIXSPD_50_66:
1184 sc->sc_bus_speed = 66;
1185 break;
1186 case STATUS_PCIXSPD_66_100:
1187 sc->sc_bus_speed = 100;
1188 break;
1189 case STATUS_PCIXSPD_100_133:
1190 sc->sc_bus_speed = 133;
1191 break;
1192 default:
1193 aprint_error_dev(sc->sc_dev,
1194 "unknown PCIXSPD %d; assuming 66MHz\n",
1195 reg & STATUS_PCIXSPD_MASK);
1196 sc->sc_bus_speed = 66;
1197 }
1198 } else
1199 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1200 aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1201 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1202 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1203 }
1204
1205 /*
1206 * Allocate the control data structures, and create and load the
1207 * DMA map for it.
1208 *
1209 * NOTE: All Tx descriptors must be in the same 4G segment of
1210 * memory. So must Rx descriptors. We simplify by allocating
1211 * both sets within the same 4G segment.
1212 */
1213 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1214 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1215 cdata_size = sc->sc_type < WM_T_82544 ?
1216 sizeof(struct wm_control_data_82542) :
1217 sizeof(struct wm_control_data_82544);
1218 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1219 (bus_size_t) 0x100000000ULL,
1220 &seg, 1, &rseg, 0)) != 0) {
1221 aprint_error_dev(sc->sc_dev,
1222 "unable to allocate control data, error = %d\n",
1223 error);
1224 goto fail_0;
1225 }
1226
1227 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1228 (void **)&sc->sc_control_data,
1229 BUS_DMA_COHERENT)) != 0) {
1230 aprint_error_dev(sc->sc_dev,
1231 "unable to map control data, error = %d\n", error);
1232 goto fail_1;
1233 }
1234
1235 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1236 0, 0, &sc->sc_cddmamap)) != 0) {
1237 aprint_error_dev(sc->sc_dev,
1238 "unable to create control data DMA map, error = %d\n",
1239 error);
1240 goto fail_2;
1241 }
1242
1243 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1244 sc->sc_control_data, cdata_size, NULL,
1245 0)) != 0) {
1246 aprint_error_dev(sc->sc_dev,
1247 "unable to load control data DMA map, error = %d\n",
1248 error);
1249 goto fail_3;
1250 }
1251
1252
1253 /*
1254 * Create the transmit buffer DMA maps.
1255 */
1256 WM_TXQUEUELEN(sc) =
1257 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1258 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1259 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1260 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1261 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1262 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1263 aprint_error_dev(sc->sc_dev,
1264 "unable to create Tx DMA map %d, error = %d\n",
1265 i, error);
1266 goto fail_4;
1267 }
1268 }
1269
1270 /*
1271 * Create the receive buffer DMA maps.
1272 */
1273 for (i = 0; i < WM_NRXDESC; i++) {
1274 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1275 MCLBYTES, 0, 0,
1276 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1277 aprint_error_dev(sc->sc_dev,
1278 "unable to create Rx DMA map %d error = %d\n",
1279 i, error);
1280 goto fail_5;
1281 }
1282 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1283 }
1284
1285 /* clear interesting stat counters */
1286 CSR_READ(sc, WMREG_COLC);
1287 CSR_READ(sc, WMREG_RXERRC);
1288
1289 /*
1290 * Reset the chip to a known state.
1291 */
1292 wm_reset(sc);
1293
1294 switch (sc->sc_type) {
1295 case WM_T_82571:
1296 case WM_T_82572:
1297 case WM_T_82573:
1298 case WM_T_82574:
1299 case WM_T_80003:
1300 case WM_T_ICH8:
1301 case WM_T_ICH9:
1302 case WM_T_ICH10:
1303 if (wm_check_mng_mode(sc) != 0)
1304 wm_get_hw_control(sc);
1305 break;
1306 default:
1307 break;
1308 }
1309
1310 /*
1311 * Get some information about the EEPROM.
1312 */
1313 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
1314 || (sc->sc_type == WM_T_ICH10)) {
1315 uint32_t flash_size;
1316 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1317 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1318 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1319 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1320 aprint_error_dev(sc->sc_dev,
1321 "can't map FLASH registers\n");
1322 return;
1323 }
1324 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1325 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1326 ICH_FLASH_SECTOR_SIZE;
1327 sc->sc_ich8_flash_bank_size =
1328 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1329 sc->sc_ich8_flash_bank_size -=
1330 (flash_size & ICH_GFPREG_BASE_MASK);
1331 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1332 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1333 } else if (sc->sc_type == WM_T_80003)
1334 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1335 else if (sc->sc_type == WM_T_82573)
1336 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1337 else if (sc->sc_type == WM_T_82574)
1338 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1339 else if (sc->sc_type > WM_T_82544)
1340 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1341
1342 if (sc->sc_type <= WM_T_82544)
1343 sc->sc_ee_addrbits = 6;
1344 else if (sc->sc_type <= WM_T_82546_3) {
1345 reg = CSR_READ(sc, WMREG_EECD);
1346 if (reg & EECD_EE_SIZE)
1347 sc->sc_ee_addrbits = 8;
1348 else
1349 sc->sc_ee_addrbits = 6;
1350 } else if (sc->sc_type <= WM_T_82547_2) {
1351 reg = CSR_READ(sc, WMREG_EECD);
1352 if (reg & EECD_EE_TYPE) {
1353 sc->sc_flags |= WM_F_EEPROM_SPI;
1354 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1355 } else
1356 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1357 } else if ((sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) &&
1358 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1359 sc->sc_flags |= WM_F_EEPROM_FLASH;
1360 } else {
1361 /* Assume everything else is SPI. */
1362 reg = CSR_READ(sc, WMREG_EECD);
1363 sc->sc_flags |= WM_F_EEPROM_SPI;
1364 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1365 }
1366
1367 /*
1368 * Defer printing the EEPROM type until after verifying the checksum
1369 * This allows the EEPROM type to be printed correctly in the case
1370 * that no EEPROM is attached.
1371 */
1372
1373 /*
1374 * Validate the EEPROM checksum. If the checksum fails, flag this for
1375 * later, so we can fail future reads from the EEPROM.
1376 */
1377 if (wm_validate_eeprom_checksum(sc)) {
1378 /*
1379 * Read twice again because some PCI-e parts fail the first
1380 * check due to the link being in sleep state.
1381 */
1382 if (wm_validate_eeprom_checksum(sc))
1383 sc->sc_flags |= WM_F_EEPROM_INVALID;
1384 }
1385
1386 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1387 aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1388 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1389 aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1390 } else {
1391 if (sc->sc_flags & WM_F_EEPROM_SPI)
1392 eetype = "SPI";
1393 else
1394 eetype = "MicroWire";
1395 aprint_verbose_dev(sc->sc_dev,
1396 "%u word (%d address bits) %s EEPROM\n",
1397 1U << sc->sc_ee_addrbits,
1398 sc->sc_ee_addrbits, eetype);
1399 }
1400
1401 /*
1402 * Read the Ethernet address from the EEPROM, if not first found
1403 * in device properties.
1404 */
1405 ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-addr");
1406 if (ea != NULL) {
1407 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1408 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1409 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1410 } else {
1411 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1412 sizeof(myea) / sizeof(myea[0]), myea)) {
1413 aprint_error_dev(sc->sc_dev,
1414 "unable to read Ethernet address\n");
1415 return;
1416 }
1417 enaddr[0] = myea[0] & 0xff;
1418 enaddr[1] = myea[0] >> 8;
1419 enaddr[2] = myea[1] & 0xff;
1420 enaddr[3] = myea[1] >> 8;
1421 enaddr[4] = myea[2] & 0xff;
1422 enaddr[5] = myea[2] >> 8;
1423 }
1424
1425 /*
1426 * Toggle the LSB of the MAC address on the second port
1427 * of the dual port controller.
1428 */
1429 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1430 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1431 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1432 enaddr[5] ^= 1;
1433 }
1434
1435 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1436 ether_sprintf(enaddr));
1437
1438 /*
1439 * Read the config info from the EEPROM, and set up various
1440 * bits in the control registers based on their contents.
1441 */
1442 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1443 "i82543-cfg1");
1444 if (pn != NULL) {
1445 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1446 cfg1 = (uint16_t) prop_number_integer_value(pn);
1447 } else {
1448 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1449 aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1450 return;
1451 }
1452 }
1453
1454 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1455 "i82543-cfg2");
1456 if (pn != NULL) {
1457 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1458 cfg2 = (uint16_t) prop_number_integer_value(pn);
1459 } else {
1460 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1461 aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1462 return;
1463 }
1464 }
1465
1466 if (sc->sc_type >= WM_T_82544) {
1467 pn = prop_dictionary_get(device_properties(sc->sc_dev),
1468 "i82543-swdpin");
1469 if (pn != NULL) {
1470 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1471 swdpin = (uint16_t) prop_number_integer_value(pn);
1472 } else {
1473 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1474 aprint_error_dev(sc->sc_dev,
1475 "unable to read SWDPIN\n");
1476 return;
1477 }
1478 }
1479 }
1480
1481 if (cfg1 & EEPROM_CFG1_ILOS)
1482 sc->sc_ctrl |= CTRL_ILOS;
1483 if (sc->sc_type >= WM_T_82544) {
1484 sc->sc_ctrl |=
1485 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1486 CTRL_SWDPIO_SHIFT;
1487 sc->sc_ctrl |=
1488 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1489 CTRL_SWDPINS_SHIFT;
1490 } else {
1491 sc->sc_ctrl |=
1492 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1493 CTRL_SWDPIO_SHIFT;
1494 }
1495
1496 #if 0
1497 if (sc->sc_type >= WM_T_82544) {
1498 if (cfg1 & EEPROM_CFG1_IPS0)
1499 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1500 if (cfg1 & EEPROM_CFG1_IPS1)
1501 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1502 sc->sc_ctrl_ext |=
1503 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1504 CTRL_EXT_SWDPIO_SHIFT;
1505 sc->sc_ctrl_ext |=
1506 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1507 CTRL_EXT_SWDPINS_SHIFT;
1508 } else {
1509 sc->sc_ctrl_ext |=
1510 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1511 CTRL_EXT_SWDPIO_SHIFT;
1512 }
1513 #endif
1514
1515 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1516 #if 0
1517 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1518 #endif
1519
1520 /*
1521 * Set up some register offsets that are different between
1522 * the i82542 and the i82543 and later chips.
1523 */
1524 if (sc->sc_type < WM_T_82543) {
1525 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1526 sc->sc_tdt_reg = WMREG_OLD_TDT;
1527 } else {
1528 sc->sc_rdt_reg = WMREG_RDT;
1529 sc->sc_tdt_reg = WMREG_TDT;
1530 }
1531
1532 /*
1533 * Determine if we're TBI or GMII mode, and initialize the
1534 * media structures accordingly.
1535 */
1536 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1537 || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_82573
1538 || sc->sc_type == WM_T_82574) {
1539 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1540 wm_gmii_mediainit(sc);
1541 } else if (sc->sc_type < WM_T_82543 ||
1542 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1543 if (wmp->wmp_flags & WMP_F_1000T)
1544 aprint_error_dev(sc->sc_dev,
1545 "WARNING: TBIMODE set on 1000BASE-T product!\n");
1546 wm_tbi_mediainit(sc);
1547 } else {
1548 if (wmp->wmp_flags & WMP_F_1000X)
1549 aprint_error_dev(sc->sc_dev,
1550 "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1551 wm_gmii_mediainit(sc);
1552 }
1553
1554 ifp = &sc->sc_ethercom.ec_if;
1555 xname = device_xname(sc->sc_dev);
1556 strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1557 ifp->if_softc = sc;
1558 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1559 ifp->if_ioctl = wm_ioctl;
1560 ifp->if_start = wm_start;
1561 ifp->if_watchdog = wm_watchdog;
1562 ifp->if_init = wm_init;
1563 ifp->if_stop = wm_stop;
1564 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1565 IFQ_SET_READY(&ifp->if_snd);
1566
1567 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
1568 sc->sc_type != WM_T_ICH8)
1569 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1570
1571 /*
1572 * If we're a i82543 or greater, we can support VLANs.
1573 */
1574 if (sc->sc_type >= WM_T_82543)
1575 sc->sc_ethercom.ec_capabilities |=
1576 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1577
1578 /*
1579 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1580 * on i82543 and later.
1581 */
1582 if (sc->sc_type >= WM_T_82543) {
1583 ifp->if_capabilities |=
1584 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1585 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1586 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1587 IFCAP_CSUM_TCPv6_Tx |
1588 IFCAP_CSUM_UDPv6_Tx;
1589 }
1590
1591 /*
1592 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1593 *
1594 * 82541GI (8086:1076) ... no
1595 * 82572EI (8086:10b9) ... yes
1596 */
1597 if (sc->sc_type >= WM_T_82571) {
1598 ifp->if_capabilities |=
1599 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1600 }
1601
1602 /*
1603 * If we're a i82544 or greater (except i82547), we can do
1604 * TCP segmentation offload.
1605 */
1606 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1607 ifp->if_capabilities |= IFCAP_TSOv4;
1608 }
1609
1610 if (sc->sc_type >= WM_T_82571) {
1611 ifp->if_capabilities |= IFCAP_TSOv6;
1612 }
1613
1614 /*
1615 * Attach the interface.
1616 */
1617 if_attach(ifp);
1618 ether_ifattach(ifp, enaddr);
1619 #if NRND > 0
1620 rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
1621 #endif
1622
1623 #ifdef WM_EVENT_COUNTERS
1624 /* Attach event counters. */
1625 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1626 NULL, xname, "txsstall");
1627 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1628 NULL, xname, "txdstall");
1629 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1630 NULL, xname, "txfifo_stall");
1631 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1632 NULL, xname, "txdw");
1633 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1634 NULL, xname, "txqe");
1635 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1636 NULL, xname, "rxintr");
1637 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1638 NULL, xname, "linkintr");
1639
1640 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1641 NULL, xname, "rxipsum");
1642 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1643 NULL, xname, "rxtusum");
1644 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1645 NULL, xname, "txipsum");
1646 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1647 NULL, xname, "txtusum");
1648 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1649 NULL, xname, "txtusum6");
1650
1651 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1652 NULL, xname, "txtso");
1653 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1654 NULL, xname, "txtso6");
1655 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1656 NULL, xname, "txtsopain");
1657
1658 for (i = 0; i < WM_NTXSEGS; i++) {
1659 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1660 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1661 NULL, xname, wm_txseg_evcnt_names[i]);
1662 }
1663
1664 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1665 NULL, xname, "txdrop");
1666
1667 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1668 NULL, xname, "tu");
1669
1670 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1671 NULL, xname, "tx_xoff");
1672 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1673 NULL, xname, "tx_xon");
1674 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1675 NULL, xname, "rx_xoff");
1676 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1677 NULL, xname, "rx_xon");
1678 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1679 NULL, xname, "rx_macctl");
1680 #endif /* WM_EVENT_COUNTERS */
1681
1682 if (!pmf_device_register(self, NULL, NULL))
1683 aprint_error_dev(self, "couldn't establish power handler\n");
1684 else
1685 pmf_class_network_register(self, ifp);
1686
1687 return;
1688
1689 /*
1690 * Free any resources we've allocated during the failed attach
1691 * attempt. Do this in reverse order and fall through.
1692 */
1693 fail_5:
1694 for (i = 0; i < WM_NRXDESC; i++) {
1695 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1696 bus_dmamap_destroy(sc->sc_dmat,
1697 sc->sc_rxsoft[i].rxs_dmamap);
1698 }
1699 fail_4:
1700 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1701 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1702 bus_dmamap_destroy(sc->sc_dmat,
1703 sc->sc_txsoft[i].txs_dmamap);
1704 }
1705 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1706 fail_3:
1707 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1708 fail_2:
1709 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1710 cdata_size);
1711 fail_1:
1712 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1713 fail_0:
1714 return;
1715 }
1716
1717 /*
1718 * wm_tx_offload:
1719 *
1720 * Set up TCP/IP checksumming parameters for the
1721 * specified packet.
1722 */
1723 static int
1724 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1725 uint8_t *fieldsp)
1726 {
1727 struct mbuf *m0 = txs->txs_mbuf;
1728 struct livengood_tcpip_ctxdesc *t;
1729 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1730 uint32_t ipcse;
1731 struct ether_header *eh;
1732 int offset, iphl;
1733 uint8_t fields;
1734
1735 /*
1736 * XXX It would be nice if the mbuf pkthdr had offset
1737 * fields for the protocol headers.
1738 */
1739
1740 eh = mtod(m0, struct ether_header *);
1741 switch (htons(eh->ether_type)) {
1742 case ETHERTYPE_IP:
1743 case ETHERTYPE_IPV6:
1744 offset = ETHER_HDR_LEN;
1745 break;
1746
1747 case ETHERTYPE_VLAN:
1748 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1749 break;
1750
1751 default:
1752 /*
1753 * Don't support this protocol or encapsulation.
1754 */
1755 *fieldsp = 0;
1756 *cmdp = 0;
1757 return (0);
1758 }
1759
1760 if ((m0->m_pkthdr.csum_flags &
1761 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1762 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1763 } else {
1764 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1765 }
1766 ipcse = offset + iphl - 1;
1767
1768 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1769 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1770 seg = 0;
1771 fields = 0;
1772
1773 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1774 int hlen = offset + iphl;
1775 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1776
1777 if (__predict_false(m0->m_len <
1778 (hlen + sizeof(struct tcphdr)))) {
1779 /*
1780 * TCP/IP headers are not in the first mbuf; we need
1781 * to do this the slow and painful way. Let's just
1782 * hope this doesn't happen very often.
1783 */
1784 struct tcphdr th;
1785
1786 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1787
1788 m_copydata(m0, hlen, sizeof(th), &th);
1789 if (v4) {
1790 struct ip ip;
1791
1792 m_copydata(m0, offset, sizeof(ip), &ip);
1793 ip.ip_len = 0;
1794 m_copyback(m0,
1795 offset + offsetof(struct ip, ip_len),
1796 sizeof(ip.ip_len), &ip.ip_len);
1797 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1798 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1799 } else {
1800 struct ip6_hdr ip6;
1801
1802 m_copydata(m0, offset, sizeof(ip6), &ip6);
1803 ip6.ip6_plen = 0;
1804 m_copyback(m0,
1805 offset + offsetof(struct ip6_hdr, ip6_plen),
1806 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1807 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1808 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1809 }
1810 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1811 sizeof(th.th_sum), &th.th_sum);
1812
1813 hlen += th.th_off << 2;
1814 } else {
1815 /*
1816 * TCP/IP headers are in the first mbuf; we can do
1817 * this the easy way.
1818 */
1819 struct tcphdr *th;
1820
1821 if (v4) {
1822 struct ip *ip =
1823 (void *)(mtod(m0, char *) + offset);
1824 th = (void *)(mtod(m0, char *) + hlen);
1825
1826 ip->ip_len = 0;
1827 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1828 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1829 } else {
1830 struct ip6_hdr *ip6 =
1831 (void *)(mtod(m0, char *) + offset);
1832 th = (void *)(mtod(m0, char *) + hlen);
1833
1834 ip6->ip6_plen = 0;
1835 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1836 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1837 }
1838 hlen += th->th_off << 2;
1839 }
1840
1841 if (v4) {
1842 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1843 cmdlen |= WTX_TCPIP_CMD_IP;
1844 } else {
1845 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1846 ipcse = 0;
1847 }
1848 cmd |= WTX_TCPIP_CMD_TSE;
1849 cmdlen |= WTX_TCPIP_CMD_TSE |
1850 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1851 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1852 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1853 }
1854
1855 /*
1856 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1857 * offload feature, if we load the context descriptor, we
1858 * MUST provide valid values for IPCSS and TUCSS fields.
1859 */
1860
1861 ipcs = WTX_TCPIP_IPCSS(offset) |
1862 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1863 WTX_TCPIP_IPCSE(ipcse);
1864 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1865 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1866 fields |= WTX_IXSM;
1867 }
1868
1869 offset += iphl;
1870
1871 if (m0->m_pkthdr.csum_flags &
1872 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1873 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1874 fields |= WTX_TXSM;
1875 tucs = WTX_TCPIP_TUCSS(offset) |
1876 WTX_TCPIP_TUCSO(offset +
1877 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1878 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1879 } else if ((m0->m_pkthdr.csum_flags &
1880 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1881 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1882 fields |= WTX_TXSM;
1883 tucs = WTX_TCPIP_TUCSS(offset) |
1884 WTX_TCPIP_TUCSO(offset +
1885 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1886 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1887 } else {
1888 /* Just initialize it to a valid TCP context. */
1889 tucs = WTX_TCPIP_TUCSS(offset) |
1890 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1891 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1892 }
1893
1894 /* Fill in the context descriptor. */
1895 t = (struct livengood_tcpip_ctxdesc *)
1896 &sc->sc_txdescs[sc->sc_txnext];
1897 t->tcpip_ipcs = htole32(ipcs);
1898 t->tcpip_tucs = htole32(tucs);
1899 t->tcpip_cmdlen = htole32(cmdlen);
1900 t->tcpip_seg = htole32(seg);
1901 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1902
1903 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1904 txs->txs_ndesc++;
1905
1906 *cmdp = cmd;
1907 *fieldsp = fields;
1908
1909 return (0);
1910 }
1911
1912 static void
1913 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1914 {
1915 struct mbuf *m;
1916 int i;
1917
1918 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
1919 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1920 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1921 "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
1922 m->m_data, m->m_len, m->m_flags);
1923 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
1924 i, i == 1 ? "" : "s");
1925 }
1926
1927 /*
1928 * wm_82547_txfifo_stall:
1929 *
1930 * Callout used to wait for the 82547 Tx FIFO to drain,
1931 * reset the FIFO pointers, and restart packet transmission.
1932 */
1933 static void
1934 wm_82547_txfifo_stall(void *arg)
1935 {
1936 struct wm_softc *sc = arg;
1937 int s;
1938
1939 s = splnet();
1940
1941 if (sc->sc_txfifo_stall) {
1942 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1943 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1944 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1945 /*
1946 * Packets have drained. Stop transmitter, reset
1947 * FIFO pointers, restart transmitter, and kick
1948 * the packet queue.
1949 */
1950 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1951 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1952 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1953 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1954 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1955 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1956 CSR_WRITE(sc, WMREG_TCTL, tctl);
1957 CSR_WRITE_FLUSH(sc);
1958
1959 sc->sc_txfifo_head = 0;
1960 sc->sc_txfifo_stall = 0;
1961 wm_start(&sc->sc_ethercom.ec_if);
1962 } else {
1963 /*
1964 * Still waiting for packets to drain; try again in
1965 * another tick.
1966 */
1967 callout_schedule(&sc->sc_txfifo_ch, 1);
1968 }
1969 }
1970
1971 splx(s);
1972 }
1973
1974 /*
1975 * wm_82547_txfifo_bugchk:
1976 *
1977 * Check for bug condition in the 82547 Tx FIFO. We need to
1978 * prevent enqueueing a packet that would wrap around the end
1979 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1980 *
1981 * We do this by checking the amount of space before the end
1982 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1983 * the Tx FIFO, wait for all remaining packets to drain, reset
1984 * the internal FIFO pointers to the beginning, and restart
1985 * transmission on the interface.
1986 */
1987 #define WM_FIFO_HDR 0x10
1988 #define WM_82547_PAD_LEN 0x3e0
1989 static int
1990 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1991 {
1992 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1993 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1994
1995 /* Just return if already stalled. */
1996 if (sc->sc_txfifo_stall)
1997 return (1);
1998
1999 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2000 /* Stall only occurs in half-duplex mode. */
2001 goto send_packet;
2002 }
2003
2004 if (len >= WM_82547_PAD_LEN + space) {
2005 sc->sc_txfifo_stall = 1;
2006 callout_schedule(&sc->sc_txfifo_ch, 1);
2007 return (1);
2008 }
2009
2010 send_packet:
2011 sc->sc_txfifo_head += len;
2012 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2013 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2014
2015 return (0);
2016 }
2017
2018 /*
2019 * wm_start: [ifnet interface function]
2020 *
2021 * Start packet transmission on the interface.
2022 */
2023 static void
2024 wm_start(struct ifnet *ifp)
2025 {
2026 struct wm_softc *sc = ifp->if_softc;
2027 struct mbuf *m0;
2028 struct m_tag *mtag;
2029 struct wm_txsoft *txs;
2030 bus_dmamap_t dmamap;
2031 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2032 bus_addr_t curaddr;
2033 bus_size_t seglen, curlen;
2034 uint32_t cksumcmd;
2035 uint8_t cksumfields;
2036
2037 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2038 return;
2039
2040 /*
2041 * Remember the previous number of free descriptors.
2042 */
2043 ofree = sc->sc_txfree;
2044
2045 /*
2046 * Loop through the send queue, setting up transmit descriptors
2047 * until we drain the queue, or use up all available transmit
2048 * descriptors.
2049 */
2050 for (;;) {
2051 /* Grab a packet off the queue. */
2052 IFQ_POLL(&ifp->if_snd, m0);
2053 if (m0 == NULL)
2054 break;
2055
2056 DPRINTF(WM_DEBUG_TX,
2057 ("%s: TX: have packet to transmit: %p\n",
2058 device_xname(sc->sc_dev), m0));
2059
2060 /* Get a work queue entry. */
2061 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2062 wm_txintr(sc);
2063 if (sc->sc_txsfree == 0) {
2064 DPRINTF(WM_DEBUG_TX,
2065 ("%s: TX: no free job descriptors\n",
2066 device_xname(sc->sc_dev)));
2067 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2068 break;
2069 }
2070 }
2071
2072 txs = &sc->sc_txsoft[sc->sc_txsnext];
2073 dmamap = txs->txs_dmamap;
2074
2075 use_tso = (m0->m_pkthdr.csum_flags &
2076 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2077
2078 /*
2079 * So says the Linux driver:
2080 * The controller does a simple calculation to make sure
2081 * there is enough room in the FIFO before initiating the
2082 * DMA for each buffer. The calc is:
2083 * 4 = ceil(buffer len / MSS)
2084 * To make sure we don't overrun the FIFO, adjust the max
2085 * buffer len if the MSS drops.
2086 */
2087 dmamap->dm_maxsegsz =
2088 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2089 ? m0->m_pkthdr.segsz << 2
2090 : WTX_MAX_LEN;
2091
2092 /*
2093 * Load the DMA map. If this fails, the packet either
2094 * didn't fit in the allotted number of segments, or we
2095 * were short on resources. For the too-many-segments
2096 * case, we simply report an error and drop the packet,
2097 * since we can't sanely copy a jumbo packet to a single
2098 * buffer.
2099 */
2100 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2101 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2102 if (error) {
2103 if (error == EFBIG) {
2104 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2105 log(LOG_ERR, "%s: Tx packet consumes too many "
2106 "DMA segments, dropping...\n",
2107 device_xname(sc->sc_dev));
2108 IFQ_DEQUEUE(&ifp->if_snd, m0);
2109 wm_dump_mbuf_chain(sc, m0);
2110 m_freem(m0);
2111 continue;
2112 }
2113 /*
2114 * Short on resources, just stop for now.
2115 */
2116 DPRINTF(WM_DEBUG_TX,
2117 ("%s: TX: dmamap load failed: %d\n",
2118 device_xname(sc->sc_dev), error));
2119 break;
2120 }
2121
2122 segs_needed = dmamap->dm_nsegs;
2123 if (use_tso) {
2124 /* For sentinel descriptor; see below. */
2125 segs_needed++;
2126 }
2127
2128 /*
2129 * Ensure we have enough descriptors free to describe
2130 * the packet. Note, we always reserve one descriptor
2131 * at the end of the ring due to the semantics of the
2132 * TDT register, plus one more in the event we need
2133 * to load offload context.
2134 */
2135 if (segs_needed > sc->sc_txfree - 2) {
2136 /*
2137 * Not enough free descriptors to transmit this
2138 * packet. We haven't committed anything yet,
2139 * so just unload the DMA map, put the packet
2140 * pack on the queue, and punt. Notify the upper
2141 * layer that there are no more slots left.
2142 */
2143 DPRINTF(WM_DEBUG_TX,
2144 ("%s: TX: need %d (%d) descriptors, have %d\n",
2145 device_xname(sc->sc_dev), dmamap->dm_nsegs,
2146 segs_needed, sc->sc_txfree - 1));
2147 ifp->if_flags |= IFF_OACTIVE;
2148 bus_dmamap_unload(sc->sc_dmat, dmamap);
2149 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2150 break;
2151 }
2152
2153 /*
2154 * Check for 82547 Tx FIFO bug. We need to do this
2155 * once we know we can transmit the packet, since we
2156 * do some internal FIFO space accounting here.
2157 */
2158 if (sc->sc_type == WM_T_82547 &&
2159 wm_82547_txfifo_bugchk(sc, m0)) {
2160 DPRINTF(WM_DEBUG_TX,
2161 ("%s: TX: 82547 Tx FIFO bug detected\n",
2162 device_xname(sc->sc_dev)));
2163 ifp->if_flags |= IFF_OACTIVE;
2164 bus_dmamap_unload(sc->sc_dmat, dmamap);
2165 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2166 break;
2167 }
2168
2169 IFQ_DEQUEUE(&ifp->if_snd, m0);
2170
2171 /*
2172 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2173 */
2174
2175 DPRINTF(WM_DEBUG_TX,
2176 ("%s: TX: packet has %d (%d) DMA segments\n",
2177 device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2178
2179 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2180
2181 /*
2182 * Store a pointer to the packet so that we can free it
2183 * later.
2184 *
2185 * Initially, we consider the number of descriptors the
2186 * packet uses the number of DMA segments. This may be
2187 * incremented by 1 if we do checksum offload (a descriptor
2188 * is used to set the checksum context).
2189 */
2190 txs->txs_mbuf = m0;
2191 txs->txs_firstdesc = sc->sc_txnext;
2192 txs->txs_ndesc = segs_needed;
2193
2194 /* Set up offload parameters for this packet. */
2195 if (m0->m_pkthdr.csum_flags &
2196 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2197 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2198 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2199 if (wm_tx_offload(sc, txs, &cksumcmd,
2200 &cksumfields) != 0) {
2201 /* Error message already displayed. */
2202 bus_dmamap_unload(sc->sc_dmat, dmamap);
2203 continue;
2204 }
2205 } else {
2206 cksumcmd = 0;
2207 cksumfields = 0;
2208 }
2209
2210 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2211
2212 /* Sync the DMA map. */
2213 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2214 BUS_DMASYNC_PREWRITE);
2215
2216 /*
2217 * Initialize the transmit descriptor.
2218 */
2219 for (nexttx = sc->sc_txnext, seg = 0;
2220 seg < dmamap->dm_nsegs; seg++) {
2221 for (seglen = dmamap->dm_segs[seg].ds_len,
2222 curaddr = dmamap->dm_segs[seg].ds_addr;
2223 seglen != 0;
2224 curaddr += curlen, seglen -= curlen,
2225 nexttx = WM_NEXTTX(sc, nexttx)) {
2226 curlen = seglen;
2227
2228 /*
2229 * So says the Linux driver:
2230 * Work around for premature descriptor
2231 * write-backs in TSO mode. Append a
2232 * 4-byte sentinel descriptor.
2233 */
2234 if (use_tso &&
2235 seg == dmamap->dm_nsegs - 1 &&
2236 curlen > 8)
2237 curlen -= 4;
2238
2239 wm_set_dma_addr(
2240 &sc->sc_txdescs[nexttx].wtx_addr,
2241 curaddr);
2242 sc->sc_txdescs[nexttx].wtx_cmdlen =
2243 htole32(cksumcmd | curlen);
2244 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2245 0;
2246 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2247 cksumfields;
2248 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2249 lasttx = nexttx;
2250
2251 DPRINTF(WM_DEBUG_TX,
2252 ("%s: TX: desc %d: low 0x%08lx, "
2253 "len 0x%04x\n",
2254 device_xname(sc->sc_dev), nexttx,
2255 curaddr & 0xffffffffUL, (unsigned)curlen));
2256 }
2257 }
2258
2259 KASSERT(lasttx != -1);
2260
2261 /*
2262 * Set up the command byte on the last descriptor of
2263 * the packet. If we're in the interrupt delay window,
2264 * delay the interrupt.
2265 */
2266 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2267 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2268
2269 /*
2270 * If VLANs are enabled and the packet has a VLAN tag, set
2271 * up the descriptor to encapsulate the packet for us.
2272 *
2273 * This is only valid on the last descriptor of the packet.
2274 */
2275 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2276 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2277 htole32(WTX_CMD_VLE);
2278 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2279 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2280 }
2281
2282 txs->txs_lastdesc = lasttx;
2283
2284 DPRINTF(WM_DEBUG_TX,
2285 ("%s: TX: desc %d: cmdlen 0x%08x\n",
2286 device_xname(sc->sc_dev),
2287 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2288
2289 /* Sync the descriptors we're using. */
2290 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2291 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2292
2293 /* Give the packet to the chip. */
2294 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2295
2296 DPRINTF(WM_DEBUG_TX,
2297 ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2298
2299 DPRINTF(WM_DEBUG_TX,
2300 ("%s: TX: finished transmitting packet, job %d\n",
2301 device_xname(sc->sc_dev), sc->sc_txsnext));
2302
2303 /* Advance the tx pointer. */
2304 sc->sc_txfree -= txs->txs_ndesc;
2305 sc->sc_txnext = nexttx;
2306
2307 sc->sc_txsfree--;
2308 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2309
2310 #if NBPFILTER > 0
2311 /* Pass the packet to any BPF listeners. */
2312 if (ifp->if_bpf)
2313 bpf_mtap(ifp->if_bpf, m0);
2314 #endif /* NBPFILTER > 0 */
2315 }
2316
2317 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2318 /* No more slots; notify upper layer. */
2319 ifp->if_flags |= IFF_OACTIVE;
2320 }
2321
2322 if (sc->sc_txfree != ofree) {
2323 /* Set a watchdog timer in case the chip flakes out. */
2324 ifp->if_timer = 5;
2325 }
2326 }
2327
2328 /*
2329 * wm_watchdog: [ifnet interface function]
2330 *
2331 * Watchdog timer handler.
2332 */
2333 static void
2334 wm_watchdog(struct ifnet *ifp)
2335 {
2336 struct wm_softc *sc = ifp->if_softc;
2337
2338 /*
2339 * Since we're using delayed interrupts, sweep up
2340 * before we report an error.
2341 */
2342 wm_txintr(sc);
2343
2344 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2345 log(LOG_ERR,
2346 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2347 device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2348 sc->sc_txnext);
2349 ifp->if_oerrors++;
2350
2351 /* Reset the interface. */
2352 (void) wm_init(ifp);
2353 }
2354
2355 /* Try to get more packets going. */
2356 wm_start(ifp);
2357 }
2358
2359 /*
2360 * wm_ioctl: [ifnet interface function]
2361 *
2362 * Handle control requests from the operator.
2363 */
2364 static int
2365 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2366 {
2367 struct wm_softc *sc = ifp->if_softc;
2368 struct ifreq *ifr = (struct ifreq *) data;
2369 int s, error;
2370
2371 s = splnet();
2372
2373 switch (cmd) {
2374 case SIOCSIFMEDIA:
2375 case SIOCGIFMEDIA:
2376 /* Flow control requires full-duplex mode. */
2377 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2378 (ifr->ifr_media & IFM_FDX) == 0)
2379 ifr->ifr_media &= ~IFM_ETH_FMASK;
2380 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2381 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2382 /* We can do both TXPAUSE and RXPAUSE. */
2383 ifr->ifr_media |=
2384 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2385 }
2386 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2387 }
2388 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2389 break;
2390 default:
2391 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2392 break;
2393
2394 error = 0;
2395
2396 if (cmd == SIOCSIFCAP)
2397 error = (*ifp->if_init)(ifp);
2398 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2399 ;
2400 else if (ifp->if_flags & IFF_RUNNING) {
2401 /*
2402 * Multicast list has changed; set the hardware filter
2403 * accordingly.
2404 */
2405 wm_set_filter(sc);
2406 }
2407 break;
2408 }
2409
2410 /* Try to get more packets going. */
2411 wm_start(ifp);
2412
2413 splx(s);
2414 return (error);
2415 }
2416
2417 /*
2418 * wm_intr:
2419 *
2420 * Interrupt service routine.
2421 */
2422 static int
2423 wm_intr(void *arg)
2424 {
2425 struct wm_softc *sc = arg;
2426 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2427 uint32_t icr;
2428 int handled = 0;
2429
2430 while (1 /* CONSTCOND */) {
2431 icr = CSR_READ(sc, WMREG_ICR);
2432 if ((icr & sc->sc_icr) == 0)
2433 break;
2434 #if 0 /*NRND > 0*/
2435 if (RND_ENABLED(&sc->rnd_source))
2436 rnd_add_uint32(&sc->rnd_source, icr);
2437 #endif
2438
2439 handled = 1;
2440
2441 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2442 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2443 DPRINTF(WM_DEBUG_RX,
2444 ("%s: RX: got Rx intr 0x%08x\n",
2445 device_xname(sc->sc_dev),
2446 icr & (ICR_RXDMT0|ICR_RXT0)));
2447 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2448 }
2449 #endif
2450 wm_rxintr(sc);
2451
2452 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2453 if (icr & ICR_TXDW) {
2454 DPRINTF(WM_DEBUG_TX,
2455 ("%s: TX: got TXDW interrupt\n",
2456 device_xname(sc->sc_dev)));
2457 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2458 }
2459 #endif
2460 wm_txintr(sc);
2461
2462 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2463 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2464 wm_linkintr(sc, icr);
2465 }
2466
2467 if (icr & ICR_RXO) {
2468 ifp->if_ierrors++;
2469 #if defined(WM_DEBUG)
2470 log(LOG_WARNING, "%s: Receive overrun\n",
2471 device_xname(sc->sc_dev));
2472 #endif /* defined(WM_DEBUG) */
2473 }
2474 }
2475
2476 if (handled) {
2477 /* Try to get more packets going. */
2478 wm_start(ifp);
2479 }
2480
2481 return (handled);
2482 }
2483
2484 /*
2485 * wm_txintr:
2486 *
2487 * Helper; handle transmit interrupts.
2488 */
2489 static void
2490 wm_txintr(struct wm_softc *sc)
2491 {
2492 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2493 struct wm_txsoft *txs;
2494 uint8_t status;
2495 int i;
2496
2497 ifp->if_flags &= ~IFF_OACTIVE;
2498
2499 /*
2500 * Go through the Tx list and free mbufs for those
2501 * frames which have been transmitted.
2502 */
2503 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2504 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2505 txs = &sc->sc_txsoft[i];
2506
2507 DPRINTF(WM_DEBUG_TX,
2508 ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
2509
2510 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2511 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2512
2513 status =
2514 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2515 if ((status & WTX_ST_DD) == 0) {
2516 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2517 BUS_DMASYNC_PREREAD);
2518 break;
2519 }
2520
2521 DPRINTF(WM_DEBUG_TX,
2522 ("%s: TX: job %d done: descs %d..%d\n",
2523 device_xname(sc->sc_dev), i, txs->txs_firstdesc,
2524 txs->txs_lastdesc));
2525
2526 /*
2527 * XXX We should probably be using the statistics
2528 * XXX registers, but I don't know if they exist
2529 * XXX on chips before the i82544.
2530 */
2531
2532 #ifdef WM_EVENT_COUNTERS
2533 if (status & WTX_ST_TU)
2534 WM_EVCNT_INCR(&sc->sc_ev_tu);
2535 #endif /* WM_EVENT_COUNTERS */
2536
2537 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2538 ifp->if_oerrors++;
2539 if (status & WTX_ST_LC)
2540 log(LOG_WARNING, "%s: late collision\n",
2541 device_xname(sc->sc_dev));
2542 else if (status & WTX_ST_EC) {
2543 ifp->if_collisions += 16;
2544 log(LOG_WARNING, "%s: excessive collisions\n",
2545 device_xname(sc->sc_dev));
2546 }
2547 } else
2548 ifp->if_opackets++;
2549
2550 sc->sc_txfree += txs->txs_ndesc;
2551 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2552 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2553 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2554 m_freem(txs->txs_mbuf);
2555 txs->txs_mbuf = NULL;
2556 }
2557
2558 /* Update the dirty transmit buffer pointer. */
2559 sc->sc_txsdirty = i;
2560 DPRINTF(WM_DEBUG_TX,
2561 ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
2562
2563 /*
2564 * If there are no more pending transmissions, cancel the watchdog
2565 * timer.
2566 */
2567 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2568 ifp->if_timer = 0;
2569 }
2570
2571 /*
2572 * wm_rxintr:
2573 *
2574 * Helper; handle receive interrupts.
2575 */
2576 static void
2577 wm_rxintr(struct wm_softc *sc)
2578 {
2579 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2580 struct wm_rxsoft *rxs;
2581 struct mbuf *m;
2582 int i, len;
2583 uint8_t status, errors;
2584 uint16_t vlantag;
2585
2586 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2587 rxs = &sc->sc_rxsoft[i];
2588
2589 DPRINTF(WM_DEBUG_RX,
2590 ("%s: RX: checking descriptor %d\n",
2591 device_xname(sc->sc_dev), i));
2592
2593 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2594
2595 status = sc->sc_rxdescs[i].wrx_status;
2596 errors = sc->sc_rxdescs[i].wrx_errors;
2597 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2598 vlantag = sc->sc_rxdescs[i].wrx_special;
2599
2600 if ((status & WRX_ST_DD) == 0) {
2601 /*
2602 * We have processed all of the receive descriptors.
2603 */
2604 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2605 break;
2606 }
2607
2608 if (__predict_false(sc->sc_rxdiscard)) {
2609 DPRINTF(WM_DEBUG_RX,
2610 ("%s: RX: discarding contents of descriptor %d\n",
2611 device_xname(sc->sc_dev), i));
2612 WM_INIT_RXDESC(sc, i);
2613 if (status & WRX_ST_EOP) {
2614 /* Reset our state. */
2615 DPRINTF(WM_DEBUG_RX,
2616 ("%s: RX: resetting rxdiscard -> 0\n",
2617 device_xname(sc->sc_dev)));
2618 sc->sc_rxdiscard = 0;
2619 }
2620 continue;
2621 }
2622
2623 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2624 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2625
2626 m = rxs->rxs_mbuf;
2627
2628 /*
2629 * Add a new receive buffer to the ring, unless of
2630 * course the length is zero. Treat the latter as a
2631 * failed mapping.
2632 */
2633 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2634 /*
2635 * Failed, throw away what we've done so
2636 * far, and discard the rest of the packet.
2637 */
2638 ifp->if_ierrors++;
2639 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2640 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2641 WM_INIT_RXDESC(sc, i);
2642 if ((status & WRX_ST_EOP) == 0)
2643 sc->sc_rxdiscard = 1;
2644 if (sc->sc_rxhead != NULL)
2645 m_freem(sc->sc_rxhead);
2646 WM_RXCHAIN_RESET(sc);
2647 DPRINTF(WM_DEBUG_RX,
2648 ("%s: RX: Rx buffer allocation failed, "
2649 "dropping packet%s\n", device_xname(sc->sc_dev),
2650 sc->sc_rxdiscard ? " (discard)" : ""));
2651 continue;
2652 }
2653
2654 m->m_len = len;
2655 sc->sc_rxlen += len;
2656 DPRINTF(WM_DEBUG_RX,
2657 ("%s: RX: buffer at %p len %d\n",
2658 device_xname(sc->sc_dev), m->m_data, len));
2659
2660 /*
2661 * If this is not the end of the packet, keep
2662 * looking.
2663 */
2664 if ((status & WRX_ST_EOP) == 0) {
2665 WM_RXCHAIN_LINK(sc, m);
2666 DPRINTF(WM_DEBUG_RX,
2667 ("%s: RX: not yet EOP, rxlen -> %d\n",
2668 device_xname(sc->sc_dev), sc->sc_rxlen));
2669 continue;
2670 }
2671
2672 /*
2673 * Okay, we have the entire packet now. The chip is
2674 * configured to include the FCS (not all chips can
2675 * be configured to strip it), so we need to trim it.
2676 * May need to adjust length of previous mbuf in the
2677 * chain if the current mbuf is too short.
2678 */
2679 if (m->m_len < ETHER_CRC_LEN) {
2680 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2681 m->m_len = 0;
2682 } else {
2683 m->m_len -= ETHER_CRC_LEN;
2684 }
2685 len = sc->sc_rxlen - ETHER_CRC_LEN;
2686
2687 WM_RXCHAIN_LINK(sc, m);
2688
2689 *sc->sc_rxtailp = NULL;
2690 m = sc->sc_rxhead;
2691
2692 WM_RXCHAIN_RESET(sc);
2693
2694 DPRINTF(WM_DEBUG_RX,
2695 ("%s: RX: have entire packet, len -> %d\n",
2696 device_xname(sc->sc_dev), len));
2697
2698 /*
2699 * If an error occurred, update stats and drop the packet.
2700 */
2701 if (errors &
2702 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2703 ifp->if_ierrors++;
2704 if (errors & WRX_ER_SE)
2705 log(LOG_WARNING, "%s: symbol error\n",
2706 device_xname(sc->sc_dev));
2707 else if (errors & WRX_ER_SEQ)
2708 log(LOG_WARNING, "%s: receive sequence error\n",
2709 device_xname(sc->sc_dev));
2710 else if (errors & WRX_ER_CE)
2711 log(LOG_WARNING, "%s: CRC error\n",
2712 device_xname(sc->sc_dev));
2713 m_freem(m);
2714 continue;
2715 }
2716
2717 /*
2718 * No errors. Receive the packet.
2719 */
2720 m->m_pkthdr.rcvif = ifp;
2721 m->m_pkthdr.len = len;
2722
2723 /*
2724 * If VLANs are enabled, VLAN packets have been unwrapped
2725 * for us. Associate the tag with the packet.
2726 */
2727 if ((status & WRX_ST_VP) != 0) {
2728 VLAN_INPUT_TAG(ifp, m,
2729 le16toh(vlantag),
2730 continue);
2731 }
2732
2733 /*
2734 * Set up checksum info for this packet.
2735 */
2736 if ((status & WRX_ST_IXSM) == 0) {
2737 if (status & WRX_ST_IPCS) {
2738 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2739 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2740 if (errors & WRX_ER_IPE)
2741 m->m_pkthdr.csum_flags |=
2742 M_CSUM_IPv4_BAD;
2743 }
2744 if (status & WRX_ST_TCPCS) {
2745 /*
2746 * Note: we don't know if this was TCP or UDP,
2747 * so we just set both bits, and expect the
2748 * upper layers to deal.
2749 */
2750 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2751 m->m_pkthdr.csum_flags |=
2752 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2753 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2754 if (errors & WRX_ER_TCPE)
2755 m->m_pkthdr.csum_flags |=
2756 M_CSUM_TCP_UDP_BAD;
2757 }
2758 }
2759
2760 ifp->if_ipackets++;
2761
2762 #if NBPFILTER > 0
2763 /* Pass this up to any BPF listeners. */
2764 if (ifp->if_bpf)
2765 bpf_mtap(ifp->if_bpf, m);
2766 #endif /* NBPFILTER > 0 */
2767
2768 /* Pass it on. */
2769 (*ifp->if_input)(ifp, m);
2770 }
2771
2772 /* Update the receive pointer. */
2773 sc->sc_rxptr = i;
2774
2775 DPRINTF(WM_DEBUG_RX,
2776 ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
2777 }
2778
2779 /*
2780 * wm_linkintr:
2781 *
2782 * Helper; handle link interrupts.
2783 */
2784 static void
2785 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2786 {
2787 uint32_t status;
2788
2789 DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
2790 __func__));
2791 /*
2792 * If we get a link status interrupt on a 1000BASE-T
2793 * device, just fall into the normal MII tick path.
2794 */
2795 if (sc->sc_flags & WM_F_HAS_MII) {
2796 if (icr & ICR_LSC) {
2797 DPRINTF(WM_DEBUG_LINK,
2798 ("%s: LINK: LSC -> mii_tick\n",
2799 device_xname(sc->sc_dev)));
2800 mii_tick(&sc->sc_mii);
2801 if (sc->sc_type == WM_T_82543) {
2802 int miistatus, active;
2803
2804 /*
2805 * With 82543, we need to force speed and
2806 * duplex on the MAC equal to what the PHY
2807 * speed and duplex configuration is.
2808 */
2809 miistatus = sc->sc_mii.mii_media_status;
2810
2811 if (miistatus & IFM_ACTIVE) {
2812 active = sc->sc_mii.mii_media_active;
2813 sc->sc_ctrl &= ~(CTRL_SPEED_MASK
2814 | CTRL_FD);
2815 switch (IFM_SUBTYPE(active)) {
2816 case IFM_10_T:
2817 sc->sc_ctrl |= CTRL_SPEED_10;
2818 break;
2819 case IFM_100_TX:
2820 sc->sc_ctrl |= CTRL_SPEED_100;
2821 break;
2822 case IFM_1000_T:
2823 sc->sc_ctrl |= CTRL_SPEED_1000;
2824 break;
2825 default:
2826 /*
2827 * fiber?
2828 * Shoud not enter here.
2829 */
2830 printf("unknown media (%x)\n",
2831 active);
2832 break;
2833 }
2834 if (active & IFM_FDX)
2835 sc->sc_ctrl |= CTRL_FD;
2836 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2837 }
2838 }
2839 } else if (icr & ICR_RXSEQ) {
2840 DPRINTF(WM_DEBUG_LINK,
2841 ("%s: LINK Receive sequence error\n",
2842 device_xname(sc->sc_dev)));
2843 }
2844 return;
2845 }
2846
2847 status = CSR_READ(sc, WMREG_STATUS);
2848 if (icr & ICR_LSC) {
2849 if (status & STATUS_LU) {
2850 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2851 device_xname(sc->sc_dev),
2852 (status & STATUS_FD) ? "FDX" : "HDX"));
2853 /*
2854 * NOTE: CTRL will update TFCE and RFCE automatically,
2855 * so we should update sc->sc_ctrl
2856 */
2857
2858 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
2859 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2860 sc->sc_fcrtl &= ~FCRTL_XONE;
2861 if (status & STATUS_FD)
2862 sc->sc_tctl |=
2863 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2864 else
2865 sc->sc_tctl |=
2866 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2867 if (sc->sc_ctrl & CTRL_TFCE)
2868 sc->sc_fcrtl |= FCRTL_XONE;
2869 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2870 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2871 WMREG_OLD_FCRTL : WMREG_FCRTL,
2872 sc->sc_fcrtl);
2873 sc->sc_tbi_linkup = 1;
2874 } else {
2875 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2876 device_xname(sc->sc_dev)));
2877 sc->sc_tbi_linkup = 0;
2878 }
2879 wm_tbi_set_linkled(sc);
2880 } else if (icr & ICR_RXCFG) {
2881 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2882 device_xname(sc->sc_dev)));
2883 sc->sc_tbi_nrxcfg++;
2884 wm_check_for_link(sc);
2885 } else if (icr & ICR_RXSEQ) {
2886 DPRINTF(WM_DEBUG_LINK,
2887 ("%s: LINK: Receive sequence error\n",
2888 device_xname(sc->sc_dev)));
2889 }
2890 }
2891
2892 /*
2893 * wm_tick:
2894 *
2895 * One second timer, used to check link status, sweep up
2896 * completed transmit jobs, etc.
2897 */
2898 static void
2899 wm_tick(void *arg)
2900 {
2901 struct wm_softc *sc = arg;
2902 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2903 int s;
2904
2905 s = splnet();
2906
2907 if (sc->sc_type >= WM_T_82542_2_1) {
2908 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2909 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2910 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2911 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2912 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2913 }
2914
2915 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2916 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2917
2918
2919 if (sc->sc_flags & WM_F_HAS_MII)
2920 mii_tick(&sc->sc_mii);
2921 else
2922 wm_tbi_check_link(sc);
2923
2924 splx(s);
2925
2926 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2927 }
2928
2929 /*
2930 * wm_reset:
2931 *
2932 * Reset the i82542 chip.
2933 */
2934 static void
2935 wm_reset(struct wm_softc *sc)
2936 {
2937 uint32_t reg;
2938
2939 /*
2940 * Allocate on-chip memory according to the MTU size.
2941 * The Packet Buffer Allocation register must be written
2942 * before the chip is reset.
2943 */
2944 switch (sc->sc_type) {
2945 case WM_T_82547:
2946 case WM_T_82547_2:
2947 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2948 PBA_22K : PBA_30K;
2949 sc->sc_txfifo_head = 0;
2950 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2951 sc->sc_txfifo_size =
2952 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2953 sc->sc_txfifo_stall = 0;
2954 break;
2955 case WM_T_82571:
2956 case WM_T_82572:
2957 case WM_T_80003:
2958 sc->sc_pba = PBA_32K;
2959 break;
2960 case WM_T_82573:
2961 case WM_T_82574:
2962 sc->sc_pba = PBA_12K;
2963 break;
2964 case WM_T_ICH8:
2965 sc->sc_pba = PBA_8K;
2966 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2967 break;
2968 case WM_T_ICH9:
2969 case WM_T_ICH10:
2970 sc->sc_pba = PBA_10K;
2971 break;
2972 default:
2973 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2974 PBA_40K : PBA_48K;
2975 break;
2976 }
2977 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2978
2979 if (sc->sc_flags & WM_F_PCIE) {
2980 int timeout = 800;
2981
2982 sc->sc_ctrl |= CTRL_GIO_M_DIS;
2983 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2984
2985 while (timeout) {
2986 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2987 break;
2988 delay(100);
2989 }
2990 }
2991
2992 /* clear interrupt */
2993 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2994
2995 /*
2996 * 82541 Errata 29? & 82547 Errata 28?
2997 * See also the description about PHY_RST bit in CTRL register
2998 * in 8254x_GBe_SDM.pdf.
2999 */
3000 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3001 CSR_WRITE(sc, WMREG_CTRL,
3002 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3003 delay(5000);
3004 }
3005
3006 switch (sc->sc_type) {
3007 case WM_T_82544:
3008 case WM_T_82540:
3009 case WM_T_82545:
3010 case WM_T_82546:
3011 case WM_T_82541:
3012 case WM_T_82541_2:
3013 /*
3014 * On some chipsets, a reset through a memory-mapped write
3015 * cycle can cause the chip to reset before completing the
3016 * write cycle. This causes major headache that can be
3017 * avoided by issuing the reset via indirect register writes
3018 * through I/O space.
3019 *
3020 * So, if we successfully mapped the I/O BAR at attach time,
3021 * use that. Otherwise, try our luck with a memory-mapped
3022 * reset.
3023 */
3024 if (sc->sc_flags & WM_F_IOH_VALID)
3025 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3026 else
3027 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3028 break;
3029
3030 case WM_T_82545_3:
3031 case WM_T_82546_3:
3032 /* Use the shadow control register on these chips. */
3033 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3034 break;
3035
3036 case WM_T_ICH8:
3037 case WM_T_ICH9:
3038 case WM_T_ICH10:
3039 wm_get_swfwhw_semaphore(sc);
3040 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
3041 delay(10000);
3042
3043 default:
3044 /* Everything else can safely use the documented method. */
3045 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3046 break;
3047 }
3048 delay(10000);
3049
3050 /* reload EEPROM */
3051 switch(sc->sc_type) {
3052 case WM_T_82542_2_0:
3053 case WM_T_82542_2_1:
3054 case WM_T_82543:
3055 case WM_T_82544:
3056 delay(10);
3057 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3058 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3059 delay(2000);
3060 break;
3061 case WM_T_82541:
3062 case WM_T_82541_2:
3063 case WM_T_82547:
3064 case WM_T_82547_2:
3065 delay(20000);
3066 break;
3067 case WM_T_82573:
3068 case WM_T_82574:
3069 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3070 delay(10);
3071 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3072 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3073 }
3074 /* FALLTHROUGH */
3075 default:
3076 /* check EECD_EE_AUTORD */
3077 wm_get_auto_rd_done(sc);
3078 }
3079
3080 #if 0
3081 for (i = 0; i < 1000; i++) {
3082 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3083 return;
3084 }
3085 delay(20);
3086 }
3087
3088 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3089 log(LOG_ERR, "%s: reset failed to complete\n",
3090 device_xname(sc->sc_dev));
3091 #endif
3092 }
3093
3094 /*
3095 * wm_init: [ifnet interface function]
3096 *
3097 * Initialize the interface. Must be called at splnet().
3098 */
3099 static int
3100 wm_init(struct ifnet *ifp)
3101 {
3102 struct wm_softc *sc = ifp->if_softc;
3103 struct wm_rxsoft *rxs;
3104 int i, error = 0;
3105 uint32_t reg;
3106
3107 /*
3108 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3109 * There is a small but measurable benefit to avoiding the adjusment
3110 * of the descriptor so that the headers are aligned, for normal mtu,
3111 * on such platforms. One possibility is that the DMA itself is
3112 * slightly more efficient if the front of the entire packet (instead
3113 * of the front of the headers) is aligned.
3114 *
3115 * Note we must always set align_tweak to 0 if we are using
3116 * jumbo frames.
3117 */
3118 #ifdef __NO_STRICT_ALIGNMENT
3119 sc->sc_align_tweak = 0;
3120 #else
3121 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3122 sc->sc_align_tweak = 0;
3123 else
3124 sc->sc_align_tweak = 2;
3125 #endif /* __NO_STRICT_ALIGNMENT */
3126
3127 /* Cancel any pending I/O. */
3128 wm_stop(ifp, 0);
3129
3130 /* update statistics before reset */
3131 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3132 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3133
3134 /* Reset the chip to a known state. */
3135 wm_reset(sc);
3136
3137 switch (sc->sc_type) {
3138 case WM_T_82571:
3139 case WM_T_82572:
3140 case WM_T_82573:
3141 case WM_T_82574:
3142 case WM_T_80003:
3143 case WM_T_ICH8:
3144 case WM_T_ICH9:
3145 case WM_T_ICH10:
3146 if (wm_check_mng_mode(sc) != 0)
3147 wm_get_hw_control(sc);
3148 break;
3149 default:
3150 break;
3151 }
3152
3153 /* Initialize the transmit descriptor ring. */
3154 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3155 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3156 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3157 sc->sc_txfree = WM_NTXDESC(sc);
3158 sc->sc_txnext = 0;
3159
3160 if (sc->sc_type < WM_T_82543) {
3161 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3162 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3163 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3164 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3165 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3166 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3167 } else {
3168 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3169 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3170 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3171 CSR_WRITE(sc, WMREG_TDH, 0);
3172 CSR_WRITE(sc, WMREG_TDT, 0);
3173 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3174 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3175
3176 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3177 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3178 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3179 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3180 }
3181 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3182 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3183
3184 /* Initialize the transmit job descriptors. */
3185 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3186 sc->sc_txsoft[i].txs_mbuf = NULL;
3187 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3188 sc->sc_txsnext = 0;
3189 sc->sc_txsdirty = 0;
3190
3191 /*
3192 * Initialize the receive descriptor and receive job
3193 * descriptor rings.
3194 */
3195 if (sc->sc_type < WM_T_82543) {
3196 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3197 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3198 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3199 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3200 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3201 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3202
3203 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3204 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3205 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3206 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3207 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3208 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3209 } else {
3210 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3211 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3212 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3213 CSR_WRITE(sc, WMREG_RDH, 0);
3214 CSR_WRITE(sc, WMREG_RDT, 0);
3215 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3216 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3217 }
3218 for (i = 0; i < WM_NRXDESC; i++) {
3219 rxs = &sc->sc_rxsoft[i];
3220 if (rxs->rxs_mbuf == NULL) {
3221 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3222 log(LOG_ERR, "%s: unable to allocate or map rx "
3223 "buffer %d, error = %d\n",
3224 device_xname(sc->sc_dev), i, error);
3225 /*
3226 * XXX Should attempt to run with fewer receive
3227 * XXX buffers instead of just failing.
3228 */
3229 wm_rxdrain(sc);
3230 goto out;
3231 }
3232 } else
3233 WM_INIT_RXDESC(sc, i);
3234 }
3235 sc->sc_rxptr = 0;
3236 sc->sc_rxdiscard = 0;
3237 WM_RXCHAIN_RESET(sc);
3238
3239 /*
3240 * Clear out the VLAN table -- we don't use it (yet).
3241 */
3242 CSR_WRITE(sc, WMREG_VET, 0);
3243 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3244 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3245
3246 /*
3247 * Set up flow-control parameters.
3248 *
3249 * XXX Values could probably stand some tuning.
3250 */
3251 if (sc->sc_type != WM_T_ICH8) {
3252 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3253 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3254 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3255 }
3256
3257 sc->sc_fcrtl = FCRTL_DFLT;
3258 if (sc->sc_type < WM_T_82543) {
3259 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3260 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3261 } else {
3262 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3263 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3264 }
3265 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3266
3267 /* Deal with VLAN enables. */
3268 if (VLAN_ATTACHED(&sc->sc_ethercom))
3269 sc->sc_ctrl |= CTRL_VME;
3270 else
3271 sc->sc_ctrl &= ~CTRL_VME;
3272
3273 /* Write the control registers. */
3274 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3275 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3276 int val;
3277 val = CSR_READ(sc, WMREG_CTRL_EXT);
3278 val &= ~CTRL_EXT_LINK_MODE_MASK;
3279 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3280
3281 /* Bypass RX and TX FIFO's */
3282 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3283 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3284 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3285
3286 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3287 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3288 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3289 /*
3290 * Set the mac to wait the maximum time between each
3291 * iteration and increase the max iterations when
3292 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3293 */
3294 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3295 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3296 val |= 0x3F;
3297 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3298 }
3299 #if 0
3300 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3301 #endif
3302
3303 /*
3304 * Set up checksum offload parameters.
3305 */
3306 reg = CSR_READ(sc, WMREG_RXCSUM);
3307 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3308 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3309 reg |= RXCSUM_IPOFL;
3310 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3311 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3312 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3313 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3314 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3315
3316 /* Reset TBI's RXCFG count */
3317 sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
3318
3319 /*
3320 * Set up the interrupt registers.
3321 */
3322 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3323 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3324 ICR_RXO | ICR_RXT0;
3325 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3326 sc->sc_icr |= ICR_RXCFG;
3327 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3328
3329 /* Set up the inter-packet gap. */
3330 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3331
3332 if (sc->sc_type >= WM_T_82543) {
3333 /*
3334 * Set up the interrupt throttling register (units of 256ns)
3335 * Note that a footnote in Intel's documentation says this
3336 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3337 * or 10Mbit mode. Empirically, it appears to be the case
3338 * that that is also true for the 1024ns units of the other
3339 * interrupt-related timer registers -- so, really, we ought
3340 * to divide this value by 4 when the link speed is low.
3341 *
3342 * XXX implement this division at link speed change!
3343 */
3344
3345 /*
3346 * For N interrupts/sec, set this value to:
3347 * 1000000000 / (N * 256). Note that we set the
3348 * absolute and packet timer values to this value
3349 * divided by 4 to get "simple timer" behavior.
3350 */
3351
3352 sc->sc_itr = 1500; /* 2604 ints/sec */
3353 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3354 }
3355
3356 /* Set the VLAN ethernetype. */
3357 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3358
3359 /*
3360 * Set up the transmit control register; we start out with
3361 * a collision distance suitable for FDX, but update it whe
3362 * we resolve the media type.
3363 */
3364 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3365 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3366 if (sc->sc_type >= WM_T_82571)
3367 sc->sc_tctl |= TCTL_MULR;
3368 if (sc->sc_type >= WM_T_80003)
3369 sc->sc_tctl |= TCTL_RTLC;
3370 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3371
3372 /* Set the media. */
3373 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3374 goto out;
3375
3376 /*
3377 * Set up the receive control register; we actually program
3378 * the register when we set the receive filter. Use multicast
3379 * address offset type 0.
3380 *
3381 * Only the i82544 has the ability to strip the incoming
3382 * CRC, so we don't enable that feature.
3383 */
3384 sc->sc_mchash_type = 0;
3385 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3386 | RCTL_MO(sc->sc_mchash_type);
3387
3388 /* 82573 doesn't support jumbo frame */
3389 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_82574 &&
3390 sc->sc_type != WM_T_ICH8)
3391 sc->sc_rctl |= RCTL_LPE;
3392
3393 if (MCLBYTES == 2048) {
3394 sc->sc_rctl |= RCTL_2k;
3395 } else {
3396 if (sc->sc_type >= WM_T_82543) {
3397 switch(MCLBYTES) {
3398 case 4096:
3399 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3400 break;
3401 case 8192:
3402 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3403 break;
3404 case 16384:
3405 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3406 break;
3407 default:
3408 panic("wm_init: MCLBYTES %d unsupported",
3409 MCLBYTES);
3410 break;
3411 }
3412 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3413 }
3414
3415 /* Set the receive filter. */
3416 wm_set_filter(sc);
3417
3418 /* Start the one second link check clock. */
3419 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3420
3421 /* ...all done! */
3422 ifp->if_flags |= IFF_RUNNING;
3423 ifp->if_flags &= ~IFF_OACTIVE;
3424
3425 out:
3426 if (error)
3427 log(LOG_ERR, "%s: interface not running\n",
3428 device_xname(sc->sc_dev));
3429 return (error);
3430 }
3431
3432 /*
3433 * wm_rxdrain:
3434 *
3435 * Drain the receive queue.
3436 */
3437 static void
3438 wm_rxdrain(struct wm_softc *sc)
3439 {
3440 struct wm_rxsoft *rxs;
3441 int i;
3442
3443 for (i = 0; i < WM_NRXDESC; i++) {
3444 rxs = &sc->sc_rxsoft[i];
3445 if (rxs->rxs_mbuf != NULL) {
3446 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3447 m_freem(rxs->rxs_mbuf);
3448 rxs->rxs_mbuf = NULL;
3449 }
3450 }
3451 }
3452
3453 /*
3454 * wm_stop: [ifnet interface function]
3455 *
3456 * Stop transmission on the interface.
3457 */
3458 static void
3459 wm_stop(struct ifnet *ifp, int disable)
3460 {
3461 struct wm_softc *sc = ifp->if_softc;
3462 struct wm_txsoft *txs;
3463 int i;
3464
3465 /* Stop the one second clock. */
3466 callout_stop(&sc->sc_tick_ch);
3467
3468 /* Stop the 82547 Tx FIFO stall check timer. */
3469 if (sc->sc_type == WM_T_82547)
3470 callout_stop(&sc->sc_txfifo_ch);
3471
3472 if (sc->sc_flags & WM_F_HAS_MII) {
3473 /* Down the MII. */
3474 mii_down(&sc->sc_mii);
3475 } else {
3476 #if 0
3477 /* Should we clear PHY's status properly? */
3478 wm_reset(sc);
3479 #endif
3480 }
3481
3482 /* Stop the transmit and receive processes. */
3483 CSR_WRITE(sc, WMREG_TCTL, 0);
3484 CSR_WRITE(sc, WMREG_RCTL, 0);
3485
3486 /*
3487 * Clear the interrupt mask to ensure the device cannot assert its
3488 * interrupt line.
3489 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3490 * any currently pending or shared interrupt.
3491 */
3492 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3493 sc->sc_icr = 0;
3494
3495 /* Release any queued transmit buffers. */
3496 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3497 txs = &sc->sc_txsoft[i];
3498 if (txs->txs_mbuf != NULL) {
3499 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3500 m_freem(txs->txs_mbuf);
3501 txs->txs_mbuf = NULL;
3502 }
3503 }
3504
3505 /* Mark the interface as down and cancel the watchdog timer. */
3506 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3507 ifp->if_timer = 0;
3508
3509 if (disable)
3510 wm_rxdrain(sc);
3511 }
3512
3513 void
3514 wm_get_auto_rd_done(struct wm_softc *sc)
3515 {
3516 int i;
3517
3518 /* wait for eeprom to reload */
3519 switch (sc->sc_type) {
3520 case WM_T_82571:
3521 case WM_T_82572:
3522 case WM_T_82573:
3523 case WM_T_82574:
3524 case WM_T_80003:
3525 case WM_T_ICH8:
3526 case WM_T_ICH9:
3527 case WM_T_ICH10:
3528 for (i = 10; i > 0; i--) {
3529 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3530 break;
3531 delay(1000);
3532 }
3533 if (i == 0) {
3534 log(LOG_ERR, "%s: auto read from eeprom failed to "
3535 "complete\n", device_xname(sc->sc_dev));
3536 }
3537 break;
3538 default:
3539 delay(5000);
3540 break;
3541 }
3542
3543 /* Phy configuration starts after EECD_AUTO_RD is set */
3544 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574)
3545 delay(25000);
3546 }
3547
3548 /*
3549 * wm_acquire_eeprom:
3550 *
3551 * Perform the EEPROM handshake required on some chips.
3552 */
3553 static int
3554 wm_acquire_eeprom(struct wm_softc *sc)
3555 {
3556 uint32_t reg;
3557 int x;
3558 int ret = 0;
3559
3560 /* always success */
3561 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3562 return 0;
3563
3564 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3565 ret = wm_get_swfwhw_semaphore(sc);
3566 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3567 /* this will also do wm_get_swsm_semaphore() if needed */
3568 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3569 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3570 ret = wm_get_swsm_semaphore(sc);
3571 }
3572
3573 if (ret) {
3574 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
3575 __func__);
3576 return 1;
3577 }
3578
3579 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3580 reg = CSR_READ(sc, WMREG_EECD);
3581
3582 /* Request EEPROM access. */
3583 reg |= EECD_EE_REQ;
3584 CSR_WRITE(sc, WMREG_EECD, reg);
3585
3586 /* ..and wait for it to be granted. */
3587 for (x = 0; x < 1000; x++) {
3588 reg = CSR_READ(sc, WMREG_EECD);
3589 if (reg & EECD_EE_GNT)
3590 break;
3591 delay(5);
3592 }
3593 if ((reg & EECD_EE_GNT) == 0) {
3594 aprint_error_dev(sc->sc_dev,
3595 "could not acquire EEPROM GNT\n");
3596 reg &= ~EECD_EE_REQ;
3597 CSR_WRITE(sc, WMREG_EECD, reg);
3598 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3599 wm_put_swfwhw_semaphore(sc);
3600 if (sc->sc_flags & WM_F_SWFW_SYNC)
3601 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3602 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3603 wm_put_swsm_semaphore(sc);
3604 return (1);
3605 }
3606 }
3607
3608 return (0);
3609 }
3610
3611 /*
3612 * wm_release_eeprom:
3613 *
3614 * Release the EEPROM mutex.
3615 */
3616 static void
3617 wm_release_eeprom(struct wm_softc *sc)
3618 {
3619 uint32_t reg;
3620
3621 /* always success */
3622 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3623 return;
3624
3625 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3626 reg = CSR_READ(sc, WMREG_EECD);
3627 reg &= ~EECD_EE_REQ;
3628 CSR_WRITE(sc, WMREG_EECD, reg);
3629 }
3630
3631 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3632 wm_put_swfwhw_semaphore(sc);
3633 if (sc->sc_flags & WM_F_SWFW_SYNC)
3634 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3635 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3636 wm_put_swsm_semaphore(sc);
3637 }
3638
3639 /*
3640 * wm_eeprom_sendbits:
3641 *
3642 * Send a series of bits to the EEPROM.
3643 */
3644 static void
3645 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3646 {
3647 uint32_t reg;
3648 int x;
3649
3650 reg = CSR_READ(sc, WMREG_EECD);
3651
3652 for (x = nbits; x > 0; x--) {
3653 if (bits & (1U << (x - 1)))
3654 reg |= EECD_DI;
3655 else
3656 reg &= ~EECD_DI;
3657 CSR_WRITE(sc, WMREG_EECD, reg);
3658 delay(2);
3659 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3660 delay(2);
3661 CSR_WRITE(sc, WMREG_EECD, reg);
3662 delay(2);
3663 }
3664 }
3665
3666 /*
3667 * wm_eeprom_recvbits:
3668 *
3669 * Receive a series of bits from the EEPROM.
3670 */
3671 static void
3672 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3673 {
3674 uint32_t reg, val;
3675 int x;
3676
3677 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3678
3679 val = 0;
3680 for (x = nbits; x > 0; x--) {
3681 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3682 delay(2);
3683 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3684 val |= (1U << (x - 1));
3685 CSR_WRITE(sc, WMREG_EECD, reg);
3686 delay(2);
3687 }
3688 *valp = val;
3689 }
3690
3691 /*
3692 * wm_read_eeprom_uwire:
3693 *
3694 * Read a word from the EEPROM using the MicroWire protocol.
3695 */
3696 static int
3697 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3698 {
3699 uint32_t reg, val;
3700 int i;
3701
3702 for (i = 0; i < wordcnt; i++) {
3703 /* Clear SK and DI. */
3704 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3705 CSR_WRITE(sc, WMREG_EECD, reg);
3706
3707 /* Set CHIP SELECT. */
3708 reg |= EECD_CS;
3709 CSR_WRITE(sc, WMREG_EECD, reg);
3710 delay(2);
3711
3712 /* Shift in the READ command. */
3713 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3714
3715 /* Shift in address. */
3716 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3717
3718 /* Shift out the data. */
3719 wm_eeprom_recvbits(sc, &val, 16);
3720 data[i] = val & 0xffff;
3721
3722 /* Clear CHIP SELECT. */
3723 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3724 CSR_WRITE(sc, WMREG_EECD, reg);
3725 delay(2);
3726 }
3727
3728 return (0);
3729 }
3730
3731 /*
3732 * wm_spi_eeprom_ready:
3733 *
3734 * Wait for a SPI EEPROM to be ready for commands.
3735 */
3736 static int
3737 wm_spi_eeprom_ready(struct wm_softc *sc)
3738 {
3739 uint32_t val;
3740 int usec;
3741
3742 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3743 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3744 wm_eeprom_recvbits(sc, &val, 8);
3745 if ((val & SPI_SR_RDY) == 0)
3746 break;
3747 }
3748 if (usec >= SPI_MAX_RETRIES) {
3749 aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
3750 return (1);
3751 }
3752 return (0);
3753 }
3754
3755 /*
3756 * wm_read_eeprom_spi:
3757 *
3758 * Read a work from the EEPROM using the SPI protocol.
3759 */
3760 static int
3761 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3762 {
3763 uint32_t reg, val;
3764 int i;
3765 uint8_t opc;
3766
3767 /* Clear SK and CS. */
3768 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3769 CSR_WRITE(sc, WMREG_EECD, reg);
3770 delay(2);
3771
3772 if (wm_spi_eeprom_ready(sc))
3773 return (1);
3774
3775 /* Toggle CS to flush commands. */
3776 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3777 delay(2);
3778 CSR_WRITE(sc, WMREG_EECD, reg);
3779 delay(2);
3780
3781 opc = SPI_OPC_READ;
3782 if (sc->sc_ee_addrbits == 8 && word >= 128)
3783 opc |= SPI_OPC_A8;
3784
3785 wm_eeprom_sendbits(sc, opc, 8);
3786 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3787
3788 for (i = 0; i < wordcnt; i++) {
3789 wm_eeprom_recvbits(sc, &val, 16);
3790 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3791 }
3792
3793 /* Raise CS and clear SK. */
3794 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3795 CSR_WRITE(sc, WMREG_EECD, reg);
3796 delay(2);
3797
3798 return (0);
3799 }
3800
3801 #define EEPROM_CHECKSUM 0xBABA
3802 #define EEPROM_SIZE 0x0040
3803
3804 /*
3805 * wm_validate_eeprom_checksum
3806 *
3807 * The checksum is defined as the sum of the first 64 (16 bit) words.
3808 */
3809 static int
3810 wm_validate_eeprom_checksum(struct wm_softc *sc)
3811 {
3812 uint16_t checksum;
3813 uint16_t eeprom_data;
3814 int i;
3815
3816 checksum = 0;
3817
3818 for (i = 0; i < EEPROM_SIZE; i++) {
3819 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3820 return 1;
3821 checksum += eeprom_data;
3822 }
3823
3824 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3825 return 1;
3826
3827 return 0;
3828 }
3829
3830 /*
3831 * wm_read_eeprom:
3832 *
3833 * Read data from the serial EEPROM.
3834 */
3835 static int
3836 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3837 {
3838 int rv;
3839
3840 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3841 return 1;
3842
3843 if (wm_acquire_eeprom(sc))
3844 return 1;
3845
3846 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3847 || (sc->sc_type == WM_T_ICH10))
3848 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3849 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3850 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3851 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3852 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3853 else
3854 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3855
3856 wm_release_eeprom(sc);
3857 return rv;
3858 }
3859
3860 static int
3861 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3862 uint16_t *data)
3863 {
3864 int i, eerd = 0;
3865 int error = 0;
3866
3867 for (i = 0; i < wordcnt; i++) {
3868 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3869
3870 CSR_WRITE(sc, WMREG_EERD, eerd);
3871 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3872 if (error != 0)
3873 break;
3874
3875 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3876 }
3877
3878 return error;
3879 }
3880
3881 static int
3882 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3883 {
3884 uint32_t attempts = 100000;
3885 uint32_t i, reg = 0;
3886 int32_t done = -1;
3887
3888 for (i = 0; i < attempts; i++) {
3889 reg = CSR_READ(sc, rw);
3890
3891 if (reg & EERD_DONE) {
3892 done = 0;
3893 break;
3894 }
3895 delay(5);
3896 }
3897
3898 return done;
3899 }
3900
3901 /*
3902 * wm_add_rxbuf:
3903 *
3904 * Add a receive buffer to the indiciated descriptor.
3905 */
3906 static int
3907 wm_add_rxbuf(struct wm_softc *sc, int idx)
3908 {
3909 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3910 struct mbuf *m;
3911 int error;
3912
3913 MGETHDR(m, M_DONTWAIT, MT_DATA);
3914 if (m == NULL)
3915 return (ENOBUFS);
3916
3917 MCLGET(m, M_DONTWAIT);
3918 if ((m->m_flags & M_EXT) == 0) {
3919 m_freem(m);
3920 return (ENOBUFS);
3921 }
3922
3923 if (rxs->rxs_mbuf != NULL)
3924 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3925
3926 rxs->rxs_mbuf = m;
3927
3928 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3929 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3930 BUS_DMA_READ|BUS_DMA_NOWAIT);
3931 if (error) {
3932 /* XXX XXX XXX */
3933 aprint_error_dev(sc->sc_dev,
3934 "unable to load rx DMA map %d, error = %d\n",
3935 idx, error);
3936 panic("wm_add_rxbuf");
3937 }
3938
3939 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3940 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3941
3942 WM_INIT_RXDESC(sc, idx);
3943
3944 return (0);
3945 }
3946
3947 /*
3948 * wm_set_ral:
3949 *
3950 * Set an entery in the receive address list.
3951 */
3952 static void
3953 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3954 {
3955 uint32_t ral_lo, ral_hi;
3956
3957 if (enaddr != NULL) {
3958 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3959 (enaddr[3] << 24);
3960 ral_hi = enaddr[4] | (enaddr[5] << 8);
3961 ral_hi |= RAL_AV;
3962 } else {
3963 ral_lo = 0;
3964 ral_hi = 0;
3965 }
3966
3967 if (sc->sc_type >= WM_T_82544) {
3968 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3969 ral_lo);
3970 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3971 ral_hi);
3972 } else {
3973 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3974 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3975 }
3976 }
3977
3978 /*
3979 * wm_mchash:
3980 *
3981 * Compute the hash of the multicast address for the 4096-bit
3982 * multicast filter.
3983 */
3984 static uint32_t
3985 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3986 {
3987 static const int lo_shift[4] = { 4, 3, 2, 0 };
3988 static const int hi_shift[4] = { 4, 5, 6, 8 };
3989 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3990 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3991 uint32_t hash;
3992
3993 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3994 || (sc->sc_type == WM_T_ICH10)) {
3995 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3996 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3997 return (hash & 0x3ff);
3998 }
3999 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
4000 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
4001
4002 return (hash & 0xfff);
4003 }
4004
4005 /*
4006 * wm_set_filter:
4007 *
4008 * Set up the receive filter.
4009 */
4010 static void
4011 wm_set_filter(struct wm_softc *sc)
4012 {
4013 struct ethercom *ec = &sc->sc_ethercom;
4014 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4015 struct ether_multi *enm;
4016 struct ether_multistep step;
4017 bus_addr_t mta_reg;
4018 uint32_t hash, reg, bit;
4019 int i, size;
4020
4021 if (sc->sc_type >= WM_T_82544)
4022 mta_reg = WMREG_CORDOVA_MTA;
4023 else
4024 mta_reg = WMREG_MTA;
4025
4026 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
4027
4028 if (ifp->if_flags & IFF_BROADCAST)
4029 sc->sc_rctl |= RCTL_BAM;
4030 if (ifp->if_flags & IFF_PROMISC) {
4031 sc->sc_rctl |= RCTL_UPE;
4032 goto allmulti;
4033 }
4034
4035 /*
4036 * Set the station address in the first RAL slot, and
4037 * clear the remaining slots.
4038 */
4039 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4040 || (sc->sc_type == WM_T_ICH10))
4041 size = WM_ICH8_RAL_TABSIZE;
4042 else
4043 size = WM_RAL_TABSIZE;
4044 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
4045 for (i = 1; i < size; i++)
4046 wm_set_ral(sc, NULL, i);
4047
4048 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4049 || (sc->sc_type == WM_T_ICH10))
4050 size = WM_ICH8_MC_TABSIZE;
4051 else
4052 size = WM_MC_TABSIZE;
4053 /* Clear out the multicast table. */
4054 for (i = 0; i < size; i++)
4055 CSR_WRITE(sc, mta_reg + (i << 2), 0);
4056
4057 ETHER_FIRST_MULTI(step, ec, enm);
4058 while (enm != NULL) {
4059 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
4060 /*
4061 * We must listen to a range of multicast addresses.
4062 * For now, just accept all multicasts, rather than
4063 * trying to set only those filter bits needed to match
4064 * the range. (At this time, the only use of address
4065 * ranges is for IP multicast routing, for which the
4066 * range is big enough to require all bits set.)
4067 */
4068 goto allmulti;
4069 }
4070
4071 hash = wm_mchash(sc, enm->enm_addrlo);
4072
4073 reg = (hash >> 5);
4074 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4075 || (sc->sc_type == WM_T_ICH10))
4076 reg &= 0x1f;
4077 else
4078 reg &= 0x7f;
4079 bit = hash & 0x1f;
4080
4081 hash = CSR_READ(sc, mta_reg + (reg << 2));
4082 hash |= 1U << bit;
4083
4084 /* XXX Hardware bug?? */
4085 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
4086 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
4087 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4088 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
4089 } else
4090 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
4091
4092 ETHER_NEXT_MULTI(step, enm);
4093 }
4094
4095 ifp->if_flags &= ~IFF_ALLMULTI;
4096 goto setit;
4097
4098 allmulti:
4099 ifp->if_flags |= IFF_ALLMULTI;
4100 sc->sc_rctl |= RCTL_MPE;
4101
4102 setit:
4103 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
4104 }
4105
4106 /*
4107 * wm_tbi_mediainit:
4108 *
4109 * Initialize media for use on 1000BASE-X devices.
4110 */
4111 static void
4112 wm_tbi_mediainit(struct wm_softc *sc)
4113 {
4114 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4115 const char *sep = "";
4116
4117 if (sc->sc_type < WM_T_82543)
4118 sc->sc_tipg = TIPG_WM_DFLT;
4119 else
4120 sc->sc_tipg = TIPG_LG_DFLT;
4121
4122 sc->sc_tbi_anegticks = 5;
4123
4124 /* Initialize our media structures */
4125 sc->sc_mii.mii_ifp = ifp;
4126
4127 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4128 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4129 wm_tbi_mediastatus);
4130
4131 /*
4132 * SWD Pins:
4133 *
4134 * 0 = Link LED (output)
4135 * 1 = Loss Of Signal (input)
4136 */
4137 sc->sc_ctrl |= CTRL_SWDPIO(0);
4138 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4139
4140 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4141
4142 #define ADD(ss, mm, dd) \
4143 do { \
4144 aprint_normal("%s%s", sep, ss); \
4145 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4146 sep = ", "; \
4147 } while (/*CONSTCOND*/0)
4148
4149 aprint_normal_dev(sc->sc_dev, "");
4150 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4151 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4152 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4153 aprint_normal("\n");
4154
4155 #undef ADD
4156
4157 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4158 }
4159
4160 /*
4161 * wm_tbi_mediastatus: [ifmedia interface function]
4162 *
4163 * Get the current interface media status on a 1000BASE-X device.
4164 */
4165 static void
4166 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4167 {
4168 struct wm_softc *sc = ifp->if_softc;
4169 uint32_t ctrl, status;
4170
4171 ifmr->ifm_status = IFM_AVALID;
4172 ifmr->ifm_active = IFM_ETHER;
4173
4174 status = CSR_READ(sc, WMREG_STATUS);
4175 if ((status & STATUS_LU) == 0) {
4176 ifmr->ifm_active |= IFM_NONE;
4177 return;
4178 }
4179
4180 ifmr->ifm_status |= IFM_ACTIVE;
4181 ifmr->ifm_active |= IFM_1000_SX;
4182 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4183 ifmr->ifm_active |= IFM_FDX;
4184 ctrl = CSR_READ(sc, WMREG_CTRL);
4185 if (ctrl & CTRL_RFCE)
4186 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4187 if (ctrl & CTRL_TFCE)
4188 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4189 }
4190
4191 /*
4192 * wm_tbi_mediachange: [ifmedia interface function]
4193 *
4194 * Set hardware to newly-selected media on a 1000BASE-X device.
4195 */
4196 static int
4197 wm_tbi_mediachange(struct ifnet *ifp)
4198 {
4199 struct wm_softc *sc = ifp->if_softc;
4200 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4201 uint32_t status;
4202 int i;
4203
4204 sc->sc_txcw = 0;
4205 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4206 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4207 sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
4208 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4209 sc->sc_txcw |= TXCW_ANE;
4210 } else {
4211 /*
4212 * If autonegotiation is turned off, force link up and turn on
4213 * full duplex
4214 */
4215 sc->sc_txcw &= ~TXCW_ANE;
4216 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4217 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4218 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4219 delay(1000);
4220 }
4221
4222 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4223 device_xname(sc->sc_dev),sc->sc_txcw));
4224 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4225 delay(10000);
4226
4227 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4228 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
4229
4230 /*
4231 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4232 * optics detect a signal, 0 if they don't.
4233 */
4234 if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
4235 /* Have signal; wait for the link to come up. */
4236
4237 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4238 /*
4239 * Reset the link, and let autonegotiation do its thing
4240 */
4241 sc->sc_ctrl |= CTRL_LRST;
4242 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4243 delay(1000);
4244 sc->sc_ctrl &= ~CTRL_LRST;
4245 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4246 delay(1000);
4247 }
4248
4249 for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
4250 delay(10000);
4251 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4252 break;
4253 }
4254
4255 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4256 device_xname(sc->sc_dev),i));
4257
4258 status = CSR_READ(sc, WMREG_STATUS);
4259 DPRINTF(WM_DEBUG_LINK,
4260 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4261 device_xname(sc->sc_dev),status, STATUS_LU));
4262 if (status & STATUS_LU) {
4263 /* Link is up. */
4264 DPRINTF(WM_DEBUG_LINK,
4265 ("%s: LINK: set media -> link up %s\n",
4266 device_xname(sc->sc_dev),
4267 (status & STATUS_FD) ? "FDX" : "HDX"));
4268
4269 /*
4270 * NOTE: CTRL will update TFCE and RFCE automatically,
4271 * so we should update sc->sc_ctrl
4272 */
4273 sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4274 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4275 sc->sc_fcrtl &= ~FCRTL_XONE;
4276 if (status & STATUS_FD)
4277 sc->sc_tctl |=
4278 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4279 else
4280 sc->sc_tctl |=
4281 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4282 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4283 sc->sc_fcrtl |= FCRTL_XONE;
4284 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4285 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4286 WMREG_OLD_FCRTL : WMREG_FCRTL,
4287 sc->sc_fcrtl);
4288 sc->sc_tbi_linkup = 1;
4289 } else {
4290 if (i == WM_LINKUP_TIMEOUT)
4291 wm_check_for_link(sc);
4292 /* Link is down. */
4293 DPRINTF(WM_DEBUG_LINK,
4294 ("%s: LINK: set media -> link down\n",
4295 device_xname(sc->sc_dev)));
4296 sc->sc_tbi_linkup = 0;
4297 }
4298 } else {
4299 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4300 device_xname(sc->sc_dev)));
4301 sc->sc_tbi_linkup = 0;
4302 }
4303
4304 wm_tbi_set_linkled(sc);
4305
4306 return (0);
4307 }
4308
4309 /*
4310 * wm_tbi_set_linkled:
4311 *
4312 * Update the link LED on 1000BASE-X devices.
4313 */
4314 static void
4315 wm_tbi_set_linkled(struct wm_softc *sc)
4316 {
4317
4318 if (sc->sc_tbi_linkup)
4319 sc->sc_ctrl |= CTRL_SWDPIN(0);
4320 else
4321 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4322
4323 /* 82540 or newer devices are active low */
4324 sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
4325
4326 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4327 }
4328
4329 /*
4330 * wm_tbi_check_link:
4331 *
4332 * Check the link on 1000BASE-X devices.
4333 */
4334 static void
4335 wm_tbi_check_link(struct wm_softc *sc)
4336 {
4337 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4338 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4339 uint32_t rxcw, ctrl, status;
4340
4341 status = CSR_READ(sc, WMREG_STATUS);
4342
4343 rxcw = CSR_READ(sc, WMREG_RXCW);
4344 ctrl = CSR_READ(sc, WMREG_CTRL);
4345
4346 /* set link status */
4347 if ((status & STATUS_LU) == 0) {
4348 DPRINTF(WM_DEBUG_LINK,
4349 ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
4350 sc->sc_tbi_linkup = 0;
4351 } else if (sc->sc_tbi_linkup == 0) {
4352 DPRINTF(WM_DEBUG_LINK,
4353 ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
4354 (status & STATUS_FD) ? "FDX" : "HDX"));
4355 sc->sc_tbi_linkup = 1;
4356 }
4357
4358 if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
4359 && ((status & STATUS_LU) == 0)) {
4360 sc->sc_tbi_linkup = 0;
4361 if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
4362 /* RXCFG storm! */
4363 DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
4364 sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
4365 wm_init(ifp);
4366 wm_start(ifp);
4367 } else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4368 /* If the timer expired, retry autonegotiation */
4369 if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
4370 DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
4371 sc->sc_tbi_ticks = 0;
4372 /*
4373 * Reset the link, and let autonegotiation do
4374 * its thing
4375 */
4376 sc->sc_ctrl |= CTRL_LRST;
4377 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4378 delay(1000);
4379 sc->sc_ctrl &= ~CTRL_LRST;
4380 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4381 delay(1000);
4382 CSR_WRITE(sc, WMREG_TXCW,
4383 sc->sc_txcw & ~TXCW_ANE);
4384 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4385 }
4386 }
4387 }
4388
4389 wm_tbi_set_linkled(sc);
4390 }
4391
4392 /*
4393 * wm_gmii_reset:
4394 *
4395 * Reset the PHY.
4396 */
4397 static void
4398 wm_gmii_reset(struct wm_softc *sc)
4399 {
4400 uint32_t reg;
4401 int func = 0; /* XXX gcc */
4402
4403 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4404 || (sc->sc_type == WM_T_ICH10)) {
4405 if (wm_get_swfwhw_semaphore(sc)) {
4406 aprint_error_dev(sc->sc_dev,
4407 "%s: failed to get semaphore\n", __func__);
4408 return;
4409 }
4410 }
4411 if (sc->sc_type == WM_T_80003) {
4412 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4413 if (wm_get_swfw_semaphore(sc,
4414 func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4415 aprint_error_dev(sc->sc_dev,
4416 "%s: failed to get semaphore\n", __func__);
4417 return;
4418 }
4419 }
4420 if (sc->sc_type >= WM_T_82544) {
4421 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4422 delay(20000);
4423
4424 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4425 delay(20000);
4426 } else {
4427 /*
4428 * With 82543, we need to force speed and duplex on the MAC
4429 * equal to what the PHY speed and duplex configuration is.
4430 * In addition, we need to perform a hardware reset on the PHY
4431 * to take it out of reset.
4432 */
4433 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4434 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4435
4436 /* The PHY reset pin is active-low. */
4437 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4438 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4439 CTRL_EXT_SWDPIN(4));
4440 reg |= CTRL_EXT_SWDPIO(4);
4441
4442 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4443 delay(10);
4444
4445 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4446 delay(10000);
4447
4448 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4449 delay(10);
4450 #if 0
4451 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4452 #endif
4453 }
4454 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4455 || (sc->sc_type == WM_T_ICH10))
4456 wm_put_swfwhw_semaphore(sc);
4457 if (sc->sc_type == WM_T_80003)
4458 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4459 }
4460
4461 /*
4462 * wm_gmii_mediainit:
4463 *
4464 * Initialize media for use on 1000BASE-T devices.
4465 */
4466 static void
4467 wm_gmii_mediainit(struct wm_softc *sc)
4468 {
4469 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4470
4471 /* We have MII. */
4472 sc->sc_flags |= WM_F_HAS_MII;
4473
4474 if (sc->sc_type >= WM_T_80003)
4475 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4476 else
4477 sc->sc_tipg = TIPG_1000T_DFLT;
4478
4479 /*
4480 * Let the chip set speed/duplex on its own based on
4481 * signals from the PHY.
4482 * XXXbouyer - I'm not sure this is right for the 80003,
4483 * the em driver only sets CTRL_SLU here - but it seems to work.
4484 */
4485 sc->sc_ctrl |= CTRL_SLU;
4486 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4487
4488 /* Initialize our media structures and probe the GMII. */
4489 sc->sc_mii.mii_ifp = ifp;
4490
4491 if (sc->sc_type == WM_T_ICH10) {
4492 sc->sc_mii.mii_readreg = wm_gmii_bm_readreg;
4493 sc->sc_mii.mii_writereg = wm_gmii_bm_writereg;
4494 } else if (sc->sc_type >= WM_T_80003) {
4495 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4496 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4497 } else if (sc->sc_type >= WM_T_82544) {
4498 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4499 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4500 } else {
4501 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4502 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4503 }
4504 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4505
4506 wm_gmii_reset(sc);
4507
4508 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4509 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4510 wm_gmii_mediastatus);
4511
4512 mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4513 MII_OFFSET_ANY, MIIF_DOPAUSE);
4514 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4515 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4516 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4517 } else
4518 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4519 }
4520
4521 /*
4522 * wm_gmii_mediastatus: [ifmedia interface function]
4523 *
4524 * Get the current interface media status on a 1000BASE-T device.
4525 */
4526 static void
4527 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4528 {
4529 struct wm_softc *sc = ifp->if_softc;
4530
4531 ether_mediastatus(ifp, ifmr);
4532 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4533 sc->sc_flowflags;
4534 }
4535
4536 /*
4537 * wm_gmii_mediachange: [ifmedia interface function]
4538 *
4539 * Set hardware to newly-selected media on a 1000BASE-T device.
4540 */
4541 static int
4542 wm_gmii_mediachange(struct ifnet *ifp)
4543 {
4544 struct wm_softc *sc = ifp->if_softc;
4545 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4546 int rc;
4547
4548 if ((ifp->if_flags & IFF_UP) == 0)
4549 return 0;
4550
4551 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4552 sc->sc_ctrl |= CTRL_SLU;
4553 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4554 || (sc->sc_type > WM_T_82543)) {
4555 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4556 } else {
4557 sc->sc_ctrl &= ~CTRL_ASDE;
4558 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4559 if (ife->ifm_media & IFM_FDX)
4560 sc->sc_ctrl |= CTRL_FD;
4561 switch(IFM_SUBTYPE(ife->ifm_media)) {
4562 case IFM_10_T:
4563 sc->sc_ctrl |= CTRL_SPEED_10;
4564 break;
4565 case IFM_100_TX:
4566 sc->sc_ctrl |= CTRL_SPEED_100;
4567 break;
4568 case IFM_1000_T:
4569 sc->sc_ctrl |= CTRL_SPEED_1000;
4570 break;
4571 default:
4572 panic("wm_gmii_mediachange: bad media 0x%x",
4573 ife->ifm_media);
4574 }
4575 }
4576 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4577 if (sc->sc_type <= WM_T_82543)
4578 wm_gmii_reset(sc);
4579
4580 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4581 return 0;
4582 return rc;
4583 }
4584
4585 #define MDI_IO CTRL_SWDPIN(2)
4586 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4587 #define MDI_CLK CTRL_SWDPIN(3)
4588
4589 static void
4590 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4591 {
4592 uint32_t i, v;
4593
4594 v = CSR_READ(sc, WMREG_CTRL);
4595 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4596 v |= MDI_DIR | CTRL_SWDPIO(3);
4597
4598 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4599 if (data & i)
4600 v |= MDI_IO;
4601 else
4602 v &= ~MDI_IO;
4603 CSR_WRITE(sc, WMREG_CTRL, v);
4604 delay(10);
4605 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4606 delay(10);
4607 CSR_WRITE(sc, WMREG_CTRL, v);
4608 delay(10);
4609 }
4610 }
4611
4612 static uint32_t
4613 i82543_mii_recvbits(struct wm_softc *sc)
4614 {
4615 uint32_t v, i, data = 0;
4616
4617 v = CSR_READ(sc, WMREG_CTRL);
4618 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4619 v |= CTRL_SWDPIO(3);
4620
4621 CSR_WRITE(sc, WMREG_CTRL, v);
4622 delay(10);
4623 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4624 delay(10);
4625 CSR_WRITE(sc, WMREG_CTRL, v);
4626 delay(10);
4627
4628 for (i = 0; i < 16; i++) {
4629 data <<= 1;
4630 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4631 delay(10);
4632 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4633 data |= 1;
4634 CSR_WRITE(sc, WMREG_CTRL, v);
4635 delay(10);
4636 }
4637
4638 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4639 delay(10);
4640 CSR_WRITE(sc, WMREG_CTRL, v);
4641 delay(10);
4642
4643 return (data);
4644 }
4645
4646 #undef MDI_IO
4647 #undef MDI_DIR
4648 #undef MDI_CLK
4649
4650 /*
4651 * wm_gmii_i82543_readreg: [mii interface function]
4652 *
4653 * Read a PHY register on the GMII (i82543 version).
4654 */
4655 static int
4656 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4657 {
4658 struct wm_softc *sc = device_private(self);
4659 int rv;
4660
4661 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4662 i82543_mii_sendbits(sc, reg | (phy << 5) |
4663 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4664 rv = i82543_mii_recvbits(sc) & 0xffff;
4665
4666 DPRINTF(WM_DEBUG_GMII,
4667 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4668 device_xname(sc->sc_dev), phy, reg, rv));
4669
4670 return (rv);
4671 }
4672
4673 /*
4674 * wm_gmii_i82543_writereg: [mii interface function]
4675 *
4676 * Write a PHY register on the GMII (i82543 version).
4677 */
4678 static void
4679 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4680 {
4681 struct wm_softc *sc = device_private(self);
4682
4683 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4684 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4685 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4686 (MII_COMMAND_START << 30), 32);
4687 }
4688
4689 /*
4690 * wm_gmii_i82544_readreg: [mii interface function]
4691 *
4692 * Read a PHY register on the GMII.
4693 */
4694 static int
4695 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4696 {
4697 struct wm_softc *sc = device_private(self);
4698 uint32_t mdic = 0;
4699 int i, rv;
4700
4701 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4702 MDIC_REGADD(reg));
4703
4704 for (i = 0; i < 320; i++) {
4705 mdic = CSR_READ(sc, WMREG_MDIC);
4706 if (mdic & MDIC_READY)
4707 break;
4708 delay(10);
4709 }
4710
4711 if ((mdic & MDIC_READY) == 0) {
4712 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4713 device_xname(sc->sc_dev), phy, reg);
4714 rv = 0;
4715 } else if (mdic & MDIC_E) {
4716 #if 0 /* This is normal if no PHY is present. */
4717 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4718 device_xname(sc->sc_dev), phy, reg);
4719 #endif
4720 rv = 0;
4721 } else {
4722 rv = MDIC_DATA(mdic);
4723 if (rv == 0xffff)
4724 rv = 0;
4725 }
4726
4727 return (rv);
4728 }
4729
4730 /*
4731 * wm_gmii_i82544_writereg: [mii interface function]
4732 *
4733 * Write a PHY register on the GMII.
4734 */
4735 static void
4736 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4737 {
4738 struct wm_softc *sc = device_private(self);
4739 uint32_t mdic = 0;
4740 int i;
4741
4742 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4743 MDIC_REGADD(reg) | MDIC_DATA(val));
4744
4745 for (i = 0; i < 320; i++) {
4746 mdic = CSR_READ(sc, WMREG_MDIC);
4747 if (mdic & MDIC_READY)
4748 break;
4749 delay(10);
4750 }
4751
4752 if ((mdic & MDIC_READY) == 0)
4753 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4754 device_xname(sc->sc_dev), phy, reg);
4755 else if (mdic & MDIC_E)
4756 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4757 device_xname(sc->sc_dev), phy, reg);
4758 }
4759
4760 /*
4761 * wm_gmii_i80003_readreg: [mii interface function]
4762 *
4763 * Read a PHY register on the kumeran
4764 * This could be handled by the PHY layer if we didn't have to lock the
4765 * ressource ...
4766 */
4767 static int
4768 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4769 {
4770 struct wm_softc *sc = device_private(self);
4771 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4772 int rv;
4773
4774 if (phy != 1) /* only one PHY on kumeran bus */
4775 return 0;
4776
4777 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4778 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4779 __func__);
4780 return 0;
4781 }
4782
4783 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4784 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4785 reg >> GG82563_PAGE_SHIFT);
4786 } else {
4787 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4788 reg >> GG82563_PAGE_SHIFT);
4789 }
4790 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4791 delay(200);
4792 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4793 delay(200);
4794
4795 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4796 return (rv);
4797 }
4798
4799 /*
4800 * wm_gmii_i80003_writereg: [mii interface function]
4801 *
4802 * Write a PHY register on the kumeran.
4803 * This could be handled by the PHY layer if we didn't have to lock the
4804 * ressource ...
4805 */
4806 static void
4807 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4808 {
4809 struct wm_softc *sc = device_private(self);
4810 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4811
4812 if (phy != 1) /* only one PHY on kumeran bus */
4813 return;
4814
4815 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4816 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4817 __func__);
4818 return;
4819 }
4820
4821 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4822 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4823 reg >> GG82563_PAGE_SHIFT);
4824 } else {
4825 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4826 reg >> GG82563_PAGE_SHIFT);
4827 }
4828 /* Wait more 200us for a bug of the ready bit in the MDIC register */
4829 delay(200);
4830 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4831 delay(200);
4832
4833 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4834 }
4835
4836 /*
4837 * wm_gmii_bm_readreg: [mii interface function]
4838 *
4839 * Read a PHY register on the kumeran
4840 * This could be handled by the PHY layer if we didn't have to lock the
4841 * ressource ...
4842 */
4843 static int
4844 wm_gmii_bm_readreg(device_t self, int phy, int reg)
4845 {
4846 struct wm_softc *sc = device_private(self);
4847 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4848 int rv;
4849
4850 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4851 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4852 __func__);
4853 return 0;
4854 }
4855
4856 if (reg > GG82563_MAX_REG_ADDRESS) {
4857 if (phy == 1)
4858 wm_gmii_i82544_writereg(self, phy, 0x1f,
4859 reg);
4860 else
4861 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4862 reg >> GG82563_PAGE_SHIFT);
4863
4864 }
4865
4866 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4867 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4868 return (rv);
4869 }
4870
4871 /*
4872 * wm_gmii_bm_writereg: [mii interface function]
4873 *
4874 * Write a PHY register on the kumeran.
4875 * This could be handled by the PHY layer if we didn't have to lock the
4876 * ressource ...
4877 */
4878 static void
4879 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
4880 {
4881 struct wm_softc *sc = device_private(self);
4882 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4883
4884 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4885 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4886 __func__);
4887 return;
4888 }
4889
4890 if (reg > GG82563_MAX_REG_ADDRESS) {
4891 if (phy == 1)
4892 wm_gmii_i82544_writereg(self, phy, 0x1f,
4893 reg);
4894 else
4895 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4896 reg >> GG82563_PAGE_SHIFT);
4897
4898 }
4899
4900 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4901 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4902 }
4903
4904 /*
4905 * wm_gmii_statchg: [mii interface function]
4906 *
4907 * Callback from MII layer when media changes.
4908 */
4909 static void
4910 wm_gmii_statchg(device_t self)
4911 {
4912 struct wm_softc *sc = device_private(self);
4913 struct mii_data *mii = &sc->sc_mii;
4914
4915 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4916 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4917 sc->sc_fcrtl &= ~FCRTL_XONE;
4918
4919 /*
4920 * Get flow control negotiation result.
4921 */
4922 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4923 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4924 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4925 mii->mii_media_active &= ~IFM_ETH_FMASK;
4926 }
4927
4928 if (sc->sc_flowflags & IFM_FLOW) {
4929 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4930 sc->sc_ctrl |= CTRL_TFCE;
4931 sc->sc_fcrtl |= FCRTL_XONE;
4932 }
4933 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4934 sc->sc_ctrl |= CTRL_RFCE;
4935 }
4936
4937 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4938 DPRINTF(WM_DEBUG_LINK,
4939 ("%s: LINK: statchg: FDX\n", device_xname(sc->sc_dev)));
4940 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4941 } else {
4942 DPRINTF(WM_DEBUG_LINK,
4943 ("%s: LINK: statchg: HDX\n", device_xname(sc->sc_dev)));
4944 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4945 }
4946
4947 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4948 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4949 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4950 : WMREG_FCRTL, sc->sc_fcrtl);
4951 if (sc->sc_type >= WM_T_80003) {
4952 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4953 case IFM_1000_T:
4954 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4955 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4956 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4957 break;
4958 default:
4959 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4960 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4961 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4962 break;
4963 }
4964 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4965 }
4966 }
4967
4968 /*
4969 * wm_kmrn_i80003_readreg:
4970 *
4971 * Read a kumeran register
4972 */
4973 static int
4974 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4975 {
4976 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4977 int rv;
4978
4979 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
4980 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
4981 __func__);
4982 return 0;
4983 }
4984
4985 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4986 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4987 KUMCTRLSTA_REN);
4988 delay(2);
4989
4990 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4991 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4992 return (rv);
4993 }
4994
4995 /*
4996 * wm_kmrn_i80003_writereg:
4997 *
4998 * Write a kumeran register
4999 */
5000 static void
5001 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
5002 {
5003 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
5004
5005 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM)) {
5006 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5007 __func__);
5008 return;
5009 }
5010
5011 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
5012 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
5013 (val & KUMCTRLSTA_MASK));
5014 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
5015 }
5016
5017 static int
5018 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
5019 {
5020 uint32_t eecd = 0;
5021
5022 if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574) {
5023 eecd = CSR_READ(sc, WMREG_EECD);
5024
5025 /* Isolate bits 15 & 16 */
5026 eecd = ((eecd >> 15) & 0x03);
5027
5028 /* If both bits are set, device is Flash type */
5029 if (eecd == 0x03) {
5030 return 0;
5031 }
5032 }
5033 return 1;
5034 }
5035
5036 static int
5037 wm_get_swsm_semaphore(struct wm_softc *sc)
5038 {
5039 int32_t timeout;
5040 uint32_t swsm;
5041
5042 /* Get the FW semaphore. */
5043 timeout = 1000 + 1; /* XXX */
5044 while (timeout) {
5045 swsm = CSR_READ(sc, WMREG_SWSM);
5046 swsm |= SWSM_SWESMBI;
5047 CSR_WRITE(sc, WMREG_SWSM, swsm);
5048 /* if we managed to set the bit we got the semaphore. */
5049 swsm = CSR_READ(sc, WMREG_SWSM);
5050 if (swsm & SWSM_SWESMBI)
5051 break;
5052
5053 delay(50);
5054 timeout--;
5055 }
5056
5057 if (timeout == 0) {
5058 aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
5059 /* Release semaphores */
5060 wm_put_swsm_semaphore(sc);
5061 return 1;
5062 }
5063 return 0;
5064 }
5065
5066 static void
5067 wm_put_swsm_semaphore(struct wm_softc *sc)
5068 {
5069 uint32_t swsm;
5070
5071 swsm = CSR_READ(sc, WMREG_SWSM);
5072 swsm &= ~(SWSM_SWESMBI);
5073 CSR_WRITE(sc, WMREG_SWSM, swsm);
5074 }
5075
5076 static int
5077 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5078 {
5079 uint32_t swfw_sync;
5080 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
5081 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
5082 int timeout = 200;
5083
5084 for(timeout = 0; timeout < 200; timeout++) {
5085 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5086 if (wm_get_swsm_semaphore(sc)) {
5087 aprint_error_dev(sc->sc_dev,
5088 "%s: failed to get semaphore\n",
5089 __func__);
5090 return 1;
5091 }
5092 }
5093 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5094 if ((swfw_sync & (swmask | fwmask)) == 0) {
5095 swfw_sync |= swmask;
5096 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5097 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5098 wm_put_swsm_semaphore(sc);
5099 return 0;
5100 }
5101 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5102 wm_put_swsm_semaphore(sc);
5103 delay(5000);
5104 }
5105 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
5106 device_xname(sc->sc_dev), mask, swfw_sync);
5107 return 1;
5108 }
5109
5110 static void
5111 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
5112 {
5113 uint32_t swfw_sync;
5114
5115 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5116 while (wm_get_swsm_semaphore(sc) != 0)
5117 continue;
5118 }
5119 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
5120 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
5121 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
5122 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5123 wm_put_swsm_semaphore(sc);
5124 }
5125
5126 static int
5127 wm_get_swfwhw_semaphore(struct wm_softc *sc)
5128 {
5129 uint32_t ext_ctrl;
5130 int timeout = 200;
5131
5132 for(timeout = 0; timeout < 200; timeout++) {
5133 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5134 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
5135 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5136
5137 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5138 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
5139 return 0;
5140 delay(5000);
5141 }
5142 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
5143 device_xname(sc->sc_dev), ext_ctrl);
5144 return 1;
5145 }
5146
5147 static void
5148 wm_put_swfwhw_semaphore(struct wm_softc *sc)
5149 {
5150 uint32_t ext_ctrl;
5151 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
5152 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
5153 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
5154 }
5155
5156 static int
5157 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
5158 {
5159 uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
5160 uint8_t bank_high_byte;
5161 uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
5162
5163 if (sc->sc_type != WM_T_ICH10) {
5164 /* Value of bit 22 corresponds to the flash bank we're on. */
5165 *bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
5166 } else {
5167 wm_read_ich8_byte(sc, act_offset, &bank_high_byte);
5168 if ((bank_high_byte & 0xc0) == 0x80)
5169 *bank = 0;
5170 else {
5171 wm_read_ich8_byte(sc, act_offset + bank1_offset,
5172 &bank_high_byte);
5173 if ((bank_high_byte & 0xc0) == 0x80)
5174 *bank = 1;
5175 else {
5176 aprint_error_dev(sc->sc_dev,
5177 "EEPROM not present\n");
5178 return -1;
5179 }
5180 }
5181 }
5182
5183 return 0;
5184 }
5185
5186 /******************************************************************************
5187 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
5188 * register.
5189 *
5190 * sc - Struct containing variables accessed by shared code
5191 * offset - offset of word in the EEPROM to read
5192 * data - word read from the EEPROM
5193 * words - number of words to read
5194 *****************************************************************************/
5195 static int
5196 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
5197 {
5198 int32_t error = 0;
5199 uint32_t flash_bank = 0;
5200 uint32_t act_offset = 0;
5201 uint32_t bank_offset = 0;
5202 uint16_t word = 0;
5203 uint16_t i = 0;
5204
5205 /* We need to know which is the valid flash bank. In the event
5206 * that we didn't allocate eeprom_shadow_ram, we may not be
5207 * managing flash_bank. So it cannot be trusted and needs
5208 * to be updated with each read.
5209 */
5210 error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
5211 if (error) {
5212 aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
5213 __func__);
5214 return error;
5215 }
5216
5217 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
5218 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
5219
5220 error = wm_get_swfwhw_semaphore(sc);
5221 if (error) {
5222 aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5223 __func__);
5224 return error;
5225 }
5226
5227 for (i = 0; i < words; i++) {
5228 /* The NVM part needs a byte offset, hence * 2 */
5229 act_offset = bank_offset + ((offset + i) * 2);
5230 error = wm_read_ich8_word(sc, act_offset, &word);
5231 if (error) {
5232 aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
5233 __func__);
5234 break;
5235 }
5236 data[i] = word;
5237 }
5238
5239 wm_put_swfwhw_semaphore(sc);
5240 return error;
5241 }
5242
5243 /******************************************************************************
5244 * This function does initial flash setup so that a new read/write/erase cycle
5245 * can be started.
5246 *
5247 * sc - The pointer to the hw structure
5248 ****************************************************************************/
5249 static int32_t
5250 wm_ich8_cycle_init(struct wm_softc *sc)
5251 {
5252 uint16_t hsfsts;
5253 int32_t error = 1;
5254 int32_t i = 0;
5255
5256 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5257
5258 /* May be check the Flash Des Valid bit in Hw status */
5259 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
5260 return error;
5261 }
5262
5263 /* Clear FCERR in Hw status by writing 1 */
5264 /* Clear DAEL in Hw status by writing a 1 */
5265 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
5266
5267 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5268
5269 /* Either we should have a hardware SPI cycle in progress bit to check
5270 * against, in order to start a new cycle or FDONE bit should be changed
5271 * in the hardware so that it is 1 after harware reset, which can then be
5272 * used as an indication whether a cycle is in progress or has been
5273 * completed .. we should also have some software semaphore mechanism to
5274 * guard FDONE or the cycle in progress bit so that two threads access to
5275 * those bits can be sequentiallized or a way so that 2 threads dont
5276 * start the cycle at the same time */
5277
5278 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5279 /* There is no cycle running at present, so we can start a cycle */
5280 /* Begin by setting Flash Cycle Done. */
5281 hsfsts |= HSFSTS_DONE;
5282 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5283 error = 0;
5284 } else {
5285 /* otherwise poll for sometime so the current cycle has a chance
5286 * to end before giving up. */
5287 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5288 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5289 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5290 error = 0;
5291 break;
5292 }
5293 delay(1);
5294 }
5295 if (error == 0) {
5296 /* Successful in waiting for previous cycle to timeout,
5297 * now set the Flash Cycle Done. */
5298 hsfsts |= HSFSTS_DONE;
5299 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5300 }
5301 }
5302 return error;
5303 }
5304
5305 /******************************************************************************
5306 * This function starts a flash cycle and waits for its completion
5307 *
5308 * sc - The pointer to the hw structure
5309 ****************************************************************************/
5310 static int32_t
5311 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5312 {
5313 uint16_t hsflctl;
5314 uint16_t hsfsts;
5315 int32_t error = 1;
5316 uint32_t i = 0;
5317
5318 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5319 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5320 hsflctl |= HSFCTL_GO;
5321 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5322
5323 /* wait till FDONE bit is set to 1 */
5324 do {
5325 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5326 if (hsfsts & HSFSTS_DONE)
5327 break;
5328 delay(1);
5329 i++;
5330 } while (i < timeout);
5331 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5332 error = 0;
5333 }
5334 return error;
5335 }
5336
5337 /******************************************************************************
5338 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5339 *
5340 * sc - The pointer to the hw structure
5341 * index - The index of the byte or word to read.
5342 * size - Size of data to read, 1=byte 2=word
5343 * data - Pointer to the word to store the value read.
5344 *****************************************************************************/
5345 static int32_t
5346 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5347 uint32_t size, uint16_t* data)
5348 {
5349 uint16_t hsfsts;
5350 uint16_t hsflctl;
5351 uint32_t flash_linear_address;
5352 uint32_t flash_data = 0;
5353 int32_t error = 1;
5354 int32_t count = 0;
5355
5356 if (size < 1 || size > 2 || data == 0x0 ||
5357 index > ICH_FLASH_LINEAR_ADDR_MASK)
5358 return error;
5359
5360 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5361 sc->sc_ich8_flash_base;
5362
5363 do {
5364 delay(1);
5365 /* Steps */
5366 error = wm_ich8_cycle_init(sc);
5367 if (error)
5368 break;
5369
5370 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5371 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5372 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5373 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5374 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5375
5376 /* Write the last 24 bits of index into Flash Linear address field in
5377 * Flash Address */
5378 /* TODO: TBD maybe check the index against the size of flash */
5379
5380 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5381
5382 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5383
5384 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5385 * sequence a few more times, else read in (shift in) the Flash Data0,
5386 * the order is least significant byte first msb to lsb */
5387 if (error == 0) {
5388 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5389 if (size == 1) {
5390 *data = (uint8_t)(flash_data & 0x000000FF);
5391 } else if (size == 2) {
5392 *data = (uint16_t)(flash_data & 0x0000FFFF);
5393 }
5394 break;
5395 } else {
5396 /* If we've gotten here, then things are probably completely hosed,
5397 * but if the error condition is detected, it won't hurt to give
5398 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5399 */
5400 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5401 if (hsfsts & HSFSTS_ERR) {
5402 /* Repeat for some time before giving up. */
5403 continue;
5404 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5405 break;
5406 }
5407 }
5408 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5409
5410 return error;
5411 }
5412
5413 /******************************************************************************
5414 * Reads a single byte from the NVM using the ICH8 flash access registers.
5415 *
5416 * sc - pointer to wm_hw structure
5417 * index - The index of the byte to read.
5418 * data - Pointer to a byte to store the value read.
5419 *****************************************************************************/
5420 static int32_t
5421 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5422 {
5423 int32_t status;
5424 uint16_t word = 0;
5425
5426 status = wm_read_ich8_data(sc, index, 1, &word);
5427 if (status == 0) {
5428 *data = (uint8_t)word;
5429 }
5430
5431 return status;
5432 }
5433
5434 /******************************************************************************
5435 * Reads a word from the NVM using the ICH8 flash access registers.
5436 *
5437 * sc - pointer to wm_hw structure
5438 * index - The starting byte index of the word to read.
5439 * data - Pointer to a word to store the value read.
5440 *****************************************************************************/
5441 static int32_t
5442 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5443 {
5444 int32_t status;
5445
5446 status = wm_read_ich8_data(sc, index, 2, data);
5447 return status;
5448 }
5449
5450 static int
5451 wm_check_mng_mode(struct wm_softc *sc)
5452 {
5453 int rv;
5454
5455 switch (sc->sc_type) {
5456 case WM_T_ICH8:
5457 case WM_T_ICH9:
5458 case WM_T_ICH10:
5459 rv = wm_check_mng_mode_ich8lan(sc);
5460 break;
5461 #if 0
5462 case WM_T_82574:
5463 /*
5464 * The function is provided in em driver, but it's not
5465 * used. Why?
5466 */
5467 rv = wm_check_mng_mode_82574(sc);
5468 break;
5469 #endif
5470 case WM_T_82571:
5471 case WM_T_82572:
5472 case WM_T_82573:
5473 case WM_T_80003:
5474 rv = wm_check_mng_mode_generic(sc);
5475 break;
5476 default:
5477 /* noting to do */
5478 rv = 0;
5479 break;
5480 }
5481
5482 return rv;
5483 }
5484
5485 static int
5486 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
5487 {
5488 uint32_t fwsm;
5489
5490 fwsm = CSR_READ(sc, WMREG_FWSM);
5491
5492 if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
5493 return 1;
5494
5495 return 0;
5496 }
5497
5498 #if 0
5499 static int
5500 wm_check_mng_mode_82574(struct wm_softc *sc)
5501 {
5502 uint16_t data;
5503
5504 wm_read_eeprom(sc, NVM_INIT_CONTROL2_REG, 1, &data);
5505
5506 if ((data & NVM_INIT_CTRL2_MNGM) != 0)
5507 return 1;
5508
5509 return 0;
5510 }
5511 #endif
5512
5513 static int
5514 wm_check_mng_mode_generic(struct wm_softc *sc)
5515 {
5516 uint32_t fwsm;
5517
5518 fwsm = CSR_READ(sc, WMREG_FWSM);
5519
5520 if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
5521 return 1;
5522
5523 return 0;
5524 }
5525
5526 static void
5527 wm_get_hw_control(struct wm_softc *sc)
5528 {
5529 uint32_t reg;
5530
5531 switch (sc->sc_type) {
5532 case WM_T_82573:
5533 #if 0
5534 case WM_T_82574:
5535 /*
5536 * FreeBSD's em driver has the function for 82574 to checks
5537 * the management mode, but it's not used. Why?
5538 */
5539 #endif
5540 reg = CSR_READ(sc, WMREG_SWSM);
5541 CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
5542 break;
5543 case WM_T_82571:
5544 case WM_T_82572:
5545 case WM_T_80003:
5546 case WM_T_ICH8:
5547 case WM_T_ICH9:
5548 case WM_T_ICH10:
5549 reg = CSR_READ(sc, WMREG_CTRL_EXT);
5550 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
5551 break;
5552 default:
5553 break;
5554 }
5555 }
5556
5557 /* XXX Currently TBI only */
5558 static int
5559 wm_check_for_link(struct wm_softc *sc)
5560 {
5561 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5562 uint32_t rxcw;
5563 uint32_t ctrl;
5564 uint32_t status;
5565 uint32_t sig;
5566
5567 rxcw = CSR_READ(sc, WMREG_RXCW);
5568 ctrl = CSR_READ(sc, WMREG_CTRL);
5569 status = CSR_READ(sc, WMREG_STATUS);
5570
5571 sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
5572
5573 DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
5574 device_xname(sc->sc_dev), __func__,
5575 ((ctrl & CTRL_SWDPIN(1)) == sig),
5576 ((status & STATUS_LU) != 0),
5577 ((rxcw & RXCW_C) != 0)
5578 ));
5579
5580 /*
5581 * SWDPIN LU RXCW
5582 * 0 0 0
5583 * 0 0 1 (should not happen)
5584 * 0 1 0 (should not happen)
5585 * 0 1 1 (should not happen)
5586 * 1 0 0 Disable autonego and force linkup
5587 * 1 0 1 got /C/ but not linkup yet
5588 * 1 1 0 (linkup)
5589 * 1 1 1 If IFM_AUTO, back to autonego
5590 *
5591 */
5592 if (((ctrl & CTRL_SWDPIN(1)) == sig)
5593 && ((status & STATUS_LU) == 0)
5594 && ((rxcw & RXCW_C) == 0)) {
5595 DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
5596 __func__));
5597 sc->sc_tbi_linkup = 0;
5598 /* Disable auto-negotiation in the TXCW register */
5599 CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
5600
5601 /*
5602 * Force link-up and also force full-duplex.
5603 *
5604 * NOTE: CTRL was updated TFCE and RFCE automatically,
5605 * so we should update sc->sc_ctrl
5606 */
5607 sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
5608 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5609 } else if(((status & STATUS_LU) != 0)
5610 && ((rxcw & RXCW_C) != 0)
5611 && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
5612 sc->sc_tbi_linkup = 1;
5613 DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
5614 __func__));
5615 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5616 CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
5617 } else if (((ctrl & CTRL_SWDPIN(1)) == sig)
5618 && ((rxcw & RXCW_C) != 0)) {
5619 DPRINTF(WM_DEBUG_LINK, ("/C/"));
5620 } else {
5621 DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
5622 status));
5623 }
5624
5625 return 0;
5626 }
5627