if_wm.c revision 1.143 1 /* $NetBSD: if_wm.c,v 1.143 2007/08/26 22:45:58 dyoung Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers.
78 * - Fix hw VLAN assist.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.143 2007/08/26 22:45:58 dyoung Exp $");
83
84 #include "bpfilter.h"
85 #include "rnd.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <machine/bus.h>
122 #include <machine/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133
134 #include <dev/pci/if_wmreg.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 typedef enum {
236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */
253 WM_T_80003, /* i80003 */
254 WM_T_ICH8, /* ICH8 LAN */
255 } wm_chip_type;
256
257 /*
258 * Software state per device.
259 */
260 struct wm_softc {
261 struct device sc_dev; /* generic device information */
262 bus_space_tag_t sc_st; /* bus space tag */
263 bus_space_handle_t sc_sh; /* bus space handle */
264 bus_space_tag_t sc_iot; /* I/O space tag */
265 bus_space_handle_t sc_ioh; /* I/O space handle */
266 bus_space_tag_t sc_flasht; /* flash registers space tag */
267 bus_space_handle_t sc_flashh; /* flash registers space handle */
268 bus_dma_tag_t sc_dmat; /* bus DMA tag */
269 struct ethercom sc_ethercom; /* ethernet common data */
270 void *sc_sdhook; /* shutdown hook */
271 void *sc_powerhook; /* power hook */
272 pci_chipset_tag_t sc_pc;
273 pcitag_t sc_pcitag;
274 struct pci_conf_state sc_pciconf;
275
276 wm_chip_type sc_type; /* chip type */
277 int sc_flags; /* flags; see below */
278 int sc_bus_speed; /* PCI/PCIX bus speed */
279 int sc_pcix_offset; /* PCIX capability register offset */
280 int sc_flowflags; /* 802.3x flow control flags */
281
282 void *sc_ih; /* interrupt cookie */
283
284 int sc_ee_addrbits; /* EEPROM address bits */
285
286 struct mii_data sc_mii; /* MII/media information */
287
288 callout_t sc_tick_ch; /* tick callout */
289
290 bus_dmamap_t sc_cddmamap; /* control data DMA map */
291 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
292
293 int sc_align_tweak;
294
295 /*
296 * Software state for the transmit and receive descriptors.
297 */
298 int sc_txnum; /* must be a power of two */
299 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
300 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
301
302 /*
303 * Control data structures.
304 */
305 int sc_ntxdesc; /* must be a power of two */
306 struct wm_control_data_82544 *sc_control_data;
307 #define sc_txdescs sc_control_data->wcd_txdescs
308 #define sc_rxdescs sc_control_data->wcd_rxdescs
309
310 #ifdef WM_EVENT_COUNTERS
311 /* Event counters. */
312 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
313 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
314 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
315 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
316 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
317 struct evcnt sc_ev_rxintr; /* Rx interrupts */
318 struct evcnt sc_ev_linkintr; /* Link interrupts */
319
320 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
321 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
322 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
323 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
324 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
325 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
326 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
327 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
328
329 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
330 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
331
332 struct evcnt sc_ev_tu; /* Tx underrun */
333
334 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
335 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
336 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
337 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
338 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
339 #endif /* WM_EVENT_COUNTERS */
340
341 bus_addr_t sc_tdt_reg; /* offset of TDT register */
342
343 int sc_txfree; /* number of free Tx descriptors */
344 int sc_txnext; /* next ready Tx descriptor */
345
346 int sc_txsfree; /* number of free Tx jobs */
347 int sc_txsnext; /* next free Tx job */
348 int sc_txsdirty; /* dirty Tx jobs */
349
350 /* These 5 variables are used only on the 82547. */
351 int sc_txfifo_size; /* Tx FIFO size */
352 int sc_txfifo_head; /* current head of FIFO */
353 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
354 int sc_txfifo_stall; /* Tx FIFO is stalled */
355 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
356
357 bus_addr_t sc_rdt_reg; /* offset of RDT register */
358
359 int sc_rxptr; /* next ready Rx descriptor/queue ent */
360 int sc_rxdiscard;
361 int sc_rxlen;
362 struct mbuf *sc_rxhead;
363 struct mbuf *sc_rxtail;
364 struct mbuf **sc_rxtailp;
365
366 uint32_t sc_ctrl; /* prototype CTRL register */
367 #if 0
368 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
369 #endif
370 uint32_t sc_icr; /* prototype interrupt bits */
371 uint32_t sc_itr; /* prototype intr throttling reg */
372 uint32_t sc_tctl; /* prototype TCTL register */
373 uint32_t sc_rctl; /* prototype RCTL register */
374 uint32_t sc_txcw; /* prototype TXCW register */
375 uint32_t sc_tipg; /* prototype TIPG register */
376 uint32_t sc_fcrtl; /* prototype FCRTL register */
377 uint32_t sc_pba; /* prototype PBA register */
378
379 int sc_tbi_linkup; /* TBI link status */
380 int sc_tbi_anstate; /* autonegotiation state */
381
382 int sc_mchash_type; /* multicast filter offset */
383
384 #if NRND > 0
385 rndsource_element_t rnd_source; /* random source */
386 #endif
387 int sc_ich8_flash_base;
388 int sc_ich8_flash_bank_size;
389 };
390
391 #define WM_RXCHAIN_RESET(sc) \
392 do { \
393 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
394 *(sc)->sc_rxtailp = NULL; \
395 (sc)->sc_rxlen = 0; \
396 } while (/*CONSTCOND*/0)
397
398 #define WM_RXCHAIN_LINK(sc, m) \
399 do { \
400 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
401 (sc)->sc_rxtailp = &(m)->m_next; \
402 } while (/*CONSTCOND*/0)
403
404 /* sc_flags */
405 #define WM_F_HAS_MII 0x0001 /* has MII */
406 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
407 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
408 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
409 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
410 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
411 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
412 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
413 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
414 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
415 #define WM_F_CSA 0x0400 /* bus is CSA */
416 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
417 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
418 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
419
420 #ifdef WM_EVENT_COUNTERS
421 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
422 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
423 #else
424 #define WM_EVCNT_INCR(ev) /* nothing */
425 #define WM_EVCNT_ADD(ev, val) /* nothing */
426 #endif
427
428 #define CSR_READ(sc, reg) \
429 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
430 #define CSR_WRITE(sc, reg, val) \
431 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
432 #define CSR_WRITE_FLUSH(sc) \
433 (void) CSR_READ((sc), WMREG_STATUS)
434
435 #define ICH8_FLASH_READ32(sc, reg) \
436 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
437 #define ICH8_FLASH_WRITE32(sc, reg, data) \
438 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
439
440 #define ICH8_FLASH_READ16(sc, reg) \
441 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
442 #define ICH8_FLASH_WRITE16(sc, reg, data) \
443 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
444
445 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
446 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
447
448 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
449 #define WM_CDTXADDR_HI(sc, x) \
450 (sizeof(bus_addr_t) == 8 ? \
451 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
452
453 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
454 #define WM_CDRXADDR_HI(sc, x) \
455 (sizeof(bus_addr_t) == 8 ? \
456 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
457
458 #define WM_CDTXSYNC(sc, x, n, ops) \
459 do { \
460 int __x, __n; \
461 \
462 __x = (x); \
463 __n = (n); \
464 \
465 /* If it will wrap around, sync to the end of the ring. */ \
466 if ((__x + __n) > WM_NTXDESC(sc)) { \
467 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
468 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
469 (WM_NTXDESC(sc) - __x), (ops)); \
470 __n -= (WM_NTXDESC(sc) - __x); \
471 __x = 0; \
472 } \
473 \
474 /* Now sync whatever is left. */ \
475 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
476 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
477 } while (/*CONSTCOND*/0)
478
479 #define WM_CDRXSYNC(sc, x, ops) \
480 do { \
481 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
482 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
483 } while (/*CONSTCOND*/0)
484
485 #define WM_INIT_RXDESC(sc, x) \
486 do { \
487 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
488 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
489 struct mbuf *__m = __rxs->rxs_mbuf; \
490 \
491 /* \
492 * Note: We scoot the packet forward 2 bytes in the buffer \
493 * so that the payload after the Ethernet header is aligned \
494 * to a 4-byte boundary. \
495 * \
496 * XXX BRAINDAMAGE ALERT! \
497 * The stupid chip uses the same size for every buffer, which \
498 * is set in the Receive Control register. We are using the 2K \
499 * size option, but what we REALLY want is (2K - 2)! For this \
500 * reason, we can't "scoot" packets longer than the standard \
501 * Ethernet MTU. On strict-alignment platforms, if the total \
502 * size exceeds (2K - 2) we set align_tweak to 0 and let \
503 * the upper layer copy the headers. \
504 */ \
505 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
506 \
507 wm_set_dma_addr(&__rxd->wrx_addr, \
508 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
509 __rxd->wrx_len = 0; \
510 __rxd->wrx_cksum = 0; \
511 __rxd->wrx_status = 0; \
512 __rxd->wrx_errors = 0; \
513 __rxd->wrx_special = 0; \
514 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
515 \
516 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
517 } while (/*CONSTCOND*/0)
518
519 static void wm_start(struct ifnet *);
520 static void wm_watchdog(struct ifnet *);
521 static int wm_ioctl(struct ifnet *, u_long, void *);
522 static int wm_init(struct ifnet *);
523 static void wm_stop(struct ifnet *, int);
524
525 static void wm_shutdown(void *);
526 static void wm_powerhook(int, void *);
527
528 static void wm_reset(struct wm_softc *);
529 static void wm_rxdrain(struct wm_softc *);
530 static int wm_add_rxbuf(struct wm_softc *, int);
531 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
532 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
533 static int wm_validate_eeprom_checksum(struct wm_softc *);
534 static void wm_tick(void *);
535
536 static void wm_set_filter(struct wm_softc *);
537
538 static int wm_intr(void *);
539 static void wm_txintr(struct wm_softc *);
540 static void wm_rxintr(struct wm_softc *);
541 static void wm_linkintr(struct wm_softc *, uint32_t);
542
543 static void wm_tbi_mediainit(struct wm_softc *);
544 static int wm_tbi_mediachange(struct ifnet *);
545 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
546
547 static void wm_tbi_set_linkled(struct wm_softc *);
548 static void wm_tbi_check_link(struct wm_softc *);
549
550 static void wm_gmii_reset(struct wm_softc *);
551
552 static int wm_gmii_i82543_readreg(struct device *, int, int);
553 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
554
555 static int wm_gmii_i82544_readreg(struct device *, int, int);
556 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
557
558 static int wm_gmii_i80003_readreg(struct device *, int, int);
559 static void wm_gmii_i80003_writereg(struct device *, int, int, int);
560
561 static void wm_gmii_statchg(struct device *);
562
563 static void wm_gmii_mediainit(struct wm_softc *);
564 static int wm_gmii_mediachange(struct ifnet *);
565 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
566
567 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
568 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
569
570 static int wm_match(struct device *, struct cfdata *, void *);
571 static void wm_attach(struct device *, struct device *, void *);
572 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
573 static int wm_get_swsm_semaphore(struct wm_softc *);
574 static void wm_put_swsm_semaphore(struct wm_softc *);
575 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
576 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
577 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
578 static int wm_get_swfwhw_semaphore(struct wm_softc *);
579 static void wm_put_swfwhw_semaphore(struct wm_softc *);
580
581 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
582 static int32_t wm_ich8_cycle_init(struct wm_softc *);
583 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
584 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
585 uint32_t, uint16_t *);
586 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
587
588 CFATTACH_DECL(wm, sizeof(struct wm_softc),
589 wm_match, wm_attach, NULL, NULL);
590
591 static void wm_82547_txfifo_stall(void *);
592
593 /*
594 * Devices supported by this driver.
595 */
596 static const struct wm_product {
597 pci_vendor_id_t wmp_vendor;
598 pci_product_id_t wmp_product;
599 const char *wmp_name;
600 wm_chip_type wmp_type;
601 int wmp_flags;
602 #define WMP_F_1000X 0x01
603 #define WMP_F_1000T 0x02
604 } wm_products[] = {
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
606 "Intel i82542 1000BASE-X Ethernet",
607 WM_T_82542_2_1, WMP_F_1000X },
608
609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
610 "Intel i82543GC 1000BASE-X Ethernet",
611 WM_T_82543, WMP_F_1000X },
612
613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
614 "Intel i82543GC 1000BASE-T Ethernet",
615 WM_T_82543, WMP_F_1000T },
616
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
618 "Intel i82544EI 1000BASE-T Ethernet",
619 WM_T_82544, WMP_F_1000T },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
622 "Intel i82544EI 1000BASE-X Ethernet",
623 WM_T_82544, WMP_F_1000X },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
626 "Intel i82544GC 1000BASE-T Ethernet",
627 WM_T_82544, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
630 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
631 WM_T_82544, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
634 "Intel i82540EM 1000BASE-T Ethernet",
635 WM_T_82540, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
638 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
639 WM_T_82540, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
642 "Intel i82540EP 1000BASE-T Ethernet",
643 WM_T_82540, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
646 "Intel i82540EP 1000BASE-T Ethernet",
647 WM_T_82540, WMP_F_1000T },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
650 "Intel i82540EP 1000BASE-T Ethernet",
651 WM_T_82540, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
654 "Intel i82545EM 1000BASE-T Ethernet",
655 WM_T_82545, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
658 "Intel i82545GM 1000BASE-T Ethernet",
659 WM_T_82545_3, WMP_F_1000T },
660
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
662 "Intel i82545GM 1000BASE-X Ethernet",
663 WM_T_82545_3, WMP_F_1000X },
664 #if 0
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
666 "Intel i82545GM Gigabit Ethernet (SERDES)",
667 WM_T_82545_3, WMP_F_SERDES },
668 #endif
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
670 "Intel i82546EB 1000BASE-T Ethernet",
671 WM_T_82546, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
674 "Intel i82546EB 1000BASE-T Ethernet",
675 WM_T_82546, WMP_F_1000T },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
678 "Intel i82545EM 1000BASE-X Ethernet",
679 WM_T_82545, WMP_F_1000X },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
682 "Intel i82546EB 1000BASE-X Ethernet",
683 WM_T_82546, WMP_F_1000X },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
686 "Intel i82546GB 1000BASE-T Ethernet",
687 WM_T_82546_3, WMP_F_1000T },
688
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
690 "Intel i82546GB 1000BASE-X Ethernet",
691 WM_T_82546_3, WMP_F_1000X },
692 #if 0
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
694 "Intel i82546GB Gigabit Ethernet (SERDES)",
695 WM_T_82546_3, WMP_F_SERDES },
696 #endif
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
698 "i82546GB quad-port Gigabit Ethernet",
699 WM_T_82546_3, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
702 "i82546GB quad-port Gigabit Ethernet (KSP3)",
703 WM_T_82546_3, WMP_F_1000T },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
706 "Intel PRO/1000MT (82546GB)",
707 WM_T_82546_3, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
710 "Intel i82541EI 1000BASE-T Ethernet",
711 WM_T_82541, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
714 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
715 WM_T_82541, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
718 "Intel i82541EI Mobile 1000BASE-T Ethernet",
719 WM_T_82541, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
722 "Intel i82541ER 1000BASE-T Ethernet",
723 WM_T_82541_2, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
726 "Intel i82541GI 1000BASE-T Ethernet",
727 WM_T_82541_2, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
730 "Intel i82541GI Mobile 1000BASE-T Ethernet",
731 WM_T_82541_2, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
734 "Intel i82541PI 1000BASE-T Ethernet",
735 WM_T_82541_2, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
738 "Intel i82547EI 1000BASE-T Ethernet",
739 WM_T_82547, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
742 "Intel i82547EI Mobile 1000BASE-T Ethernet",
743 WM_T_82547, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
746 "Intel i82547GI 1000BASE-T Ethernet",
747 WM_T_82547_2, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
750 "Intel PRO/1000 PT (82571EB)",
751 WM_T_82571, WMP_F_1000T },
752
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
754 "Intel PRO/1000 PF (82571EB)",
755 WM_T_82571, WMP_F_1000X },
756 #if 0
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
758 "Intel PRO/1000 PB (82571EB)",
759 WM_T_82571, WMP_F_SERDES },
760 #endif
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
762 "Intel PRO/1000 QT (82571EB)",
763 WM_T_82571, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
766 "Intel i82572EI 1000baseT Ethernet",
767 WM_T_82572, WMP_F_1000T },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
770 "Intel i82572EI 1000baseX Ethernet",
771 WM_T_82572, WMP_F_1000X },
772 #if 0
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
774 "Intel i82572EI Gigabit Ethernet (SERDES)",
775 WM_T_82572, WMP_F_SERDES },
776 #endif
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
779 "Intel i82572EI 1000baseT Ethernet",
780 WM_T_82572, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
783 "Intel i82573E",
784 WM_T_82573, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
787 "Intel i82573E IAMT",
788 WM_T_82573, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
791 "Intel i82573L Gigabit Ethernet",
792 WM_T_82573, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
795 "i80003 dual 1000baseT Ethernet",
796 WM_T_80003, WMP_F_1000T },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
799 "i80003 dual 1000baseX Ethernet",
800 WM_T_80003, WMP_F_1000T },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
803 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
804 WM_T_80003, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
808 "Intel i80003 1000baseT Ethernet",
809 WM_T_80003, WMP_F_1000T },
810 #if 0
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
812 "Intel i80003 Gigabit Ethernet (SERDES)",
813 WM_T_80003, WMP_F_SERDES },
814 #endif
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
816 "Intel i82801H (M_AMT) LAN Controller",
817 WM_T_ICH8, WMP_F_1000T },
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
819 "Intel i82801H (AMT) LAN Controller",
820 WM_T_ICH8, WMP_F_1000T },
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
822 "Intel i82801H LAN Controller",
823 WM_T_ICH8, WMP_F_1000T },
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
825 "Intel i82801H (IFE) LAN Controller",
826 WM_T_ICH8, WMP_F_1000T },
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
828 "Intel i82801H (M) LAN Controller",
829 WM_T_ICH8, WMP_F_1000T },
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
831 "Intel i82801H IFE (GT) LAN Controller",
832 WM_T_ICH8, WMP_F_1000T },
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
834 "Intel i82801H IFE (G) LAN Controller",
835 WM_T_ICH8, WMP_F_1000T },
836
837 { 0, 0,
838 NULL,
839 0, 0 },
840 };
841
842 #ifdef WM_EVENT_COUNTERS
843 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
844 #endif /* WM_EVENT_COUNTERS */
845
846 #if 0 /* Not currently used */
847 static inline uint32_t
848 wm_io_read(struct wm_softc *sc, int reg)
849 {
850
851 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
852 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
853 }
854 #endif
855
856 static inline void
857 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
858 {
859
860 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
861 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
862 }
863
864 static inline void
865 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
866 {
867 wa->wa_low = htole32(v & 0xffffffffU);
868 if (sizeof(bus_addr_t) == 8)
869 wa->wa_high = htole32((uint64_t) v >> 32);
870 else
871 wa->wa_high = 0;
872 }
873
874 static const struct wm_product *
875 wm_lookup(const struct pci_attach_args *pa)
876 {
877 const struct wm_product *wmp;
878
879 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
880 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
881 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
882 return (wmp);
883 }
884 return (NULL);
885 }
886
887 static int
888 wm_match(struct device *parent, struct cfdata *cf, void *aux)
889 {
890 struct pci_attach_args *pa = aux;
891
892 if (wm_lookup(pa) != NULL)
893 return (1);
894
895 return (0);
896 }
897
898 static void
899 wm_attach(struct device *parent, struct device *self, void *aux)
900 {
901 struct wm_softc *sc = (void *) self;
902 struct pci_attach_args *pa = aux;
903 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
904 pci_chipset_tag_t pc = pa->pa_pc;
905 pci_intr_handle_t ih;
906 size_t cdata_size;
907 const char *intrstr = NULL;
908 const char *eetype;
909 bus_space_tag_t memt;
910 bus_space_handle_t memh;
911 bus_dma_segment_t seg;
912 int memh_valid;
913 int i, rseg, error;
914 const struct wm_product *wmp;
915 prop_data_t ea;
916 prop_number_t pn;
917 uint8_t enaddr[ETHER_ADDR_LEN];
918 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
919 pcireg_t preg, memtype;
920 uint32_t reg;
921
922 callout_init(&sc->sc_tick_ch, 0);
923
924 wmp = wm_lookup(pa);
925 if (wmp == NULL) {
926 printf("\n");
927 panic("wm_attach: impossible");
928 }
929
930 sc->sc_pc = pa->pa_pc;
931 sc->sc_pcitag = pa->pa_tag;
932
933 if (pci_dma64_available(pa))
934 sc->sc_dmat = pa->pa_dmat64;
935 else
936 sc->sc_dmat = pa->pa_dmat;
937
938 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
939 aprint_naive(": Ethernet controller\n");
940 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
941
942 sc->sc_type = wmp->wmp_type;
943 if (sc->sc_type < WM_T_82543) {
944 if (preg < 2) {
945 aprint_error("%s: i82542 must be at least rev. 2\n",
946 sc->sc_dev.dv_xname);
947 return;
948 }
949 if (preg < 3)
950 sc->sc_type = WM_T_82542_2_0;
951 }
952
953 /*
954 * Map the device. All devices support memory-mapped acccess,
955 * and it is really required for normal operation.
956 */
957 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
958 switch (memtype) {
959 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
960 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
961 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
962 memtype, 0, &memt, &memh, NULL, NULL) == 0);
963 break;
964 default:
965 memh_valid = 0;
966 }
967
968 if (memh_valid) {
969 sc->sc_st = memt;
970 sc->sc_sh = memh;
971 } else {
972 aprint_error("%s: unable to map device registers\n",
973 sc->sc_dev.dv_xname);
974 return;
975 }
976
977 /*
978 * In addition, i82544 and later support I/O mapped indirect
979 * register access. It is not desirable (nor supported in
980 * this driver) to use it for normal operation, though it is
981 * required to work around bugs in some chip versions.
982 */
983 if (sc->sc_type >= WM_T_82544) {
984 /* First we have to find the I/O BAR. */
985 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
986 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
987 PCI_MAPREG_TYPE_IO)
988 break;
989 }
990 if (i == PCI_MAPREG_END)
991 aprint_error("%s: WARNING: unable to find I/O BAR\n",
992 sc->sc_dev.dv_xname);
993 else {
994 /*
995 * The i8254x doesn't apparently respond when the
996 * I/O BAR is 0, which looks somewhat like it's not
997 * been configured.
998 */
999 preg = pci_conf_read(pc, pa->pa_tag, i);
1000 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1001 aprint_error("%s: WARNING: I/O BAR at zero.\n",
1002 sc->sc_dev.dv_xname);
1003 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1004 0, &sc->sc_iot, &sc->sc_ioh,
1005 NULL, NULL) == 0) {
1006 sc->sc_flags |= WM_F_IOH_VALID;
1007 } else {
1008 aprint_error("%s: WARNING: unable to map "
1009 "I/O space\n", sc->sc_dev.dv_xname);
1010 }
1011 }
1012
1013 }
1014
1015 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1016 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1017 preg |= PCI_COMMAND_MASTER_ENABLE;
1018 if (sc->sc_type < WM_T_82542_2_1)
1019 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1020 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1021
1022 /* power up chip */
1023 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
1024 NULL)) && error != EOPNOTSUPP) {
1025 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
1026 error);
1027 return;
1028 }
1029
1030 /*
1031 * Map and establish our interrupt.
1032 */
1033 if (pci_intr_map(pa, &ih)) {
1034 aprint_error("%s: unable to map interrupt\n",
1035 sc->sc_dev.dv_xname);
1036 return;
1037 }
1038 intrstr = pci_intr_string(pc, ih);
1039 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1040 if (sc->sc_ih == NULL) {
1041 aprint_error("%s: unable to establish interrupt",
1042 sc->sc_dev.dv_xname);
1043 if (intrstr != NULL)
1044 aprint_normal(" at %s", intrstr);
1045 aprint_normal("\n");
1046 return;
1047 }
1048 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
1049
1050 /*
1051 * Determine a few things about the bus we're connected to.
1052 */
1053 if (sc->sc_type < WM_T_82543) {
1054 /* We don't really know the bus characteristics here. */
1055 sc->sc_bus_speed = 33;
1056 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1057 /*
1058 * CSA (Communication Streaming Architecture) is about as fast
1059 * a 32-bit 66MHz PCI Bus.
1060 */
1061 sc->sc_flags |= WM_F_CSA;
1062 sc->sc_bus_speed = 66;
1063 aprint_verbose("%s: Communication Streaming Architecture\n",
1064 sc->sc_dev.dv_xname);
1065 if (sc->sc_type == WM_T_82547) {
1066 callout_init(&sc->sc_txfifo_ch, 0);
1067 callout_setfunc(&sc->sc_txfifo_ch,
1068 wm_82547_txfifo_stall, sc);
1069 aprint_verbose("%s: using 82547 Tx FIFO stall "
1070 "work-around\n", sc->sc_dev.dv_xname);
1071 }
1072 } else if (sc->sc_type >= WM_T_82571) {
1073 sc->sc_flags |= WM_F_PCIE;
1074 if (sc->sc_type != WM_T_ICH8)
1075 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1076 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
1077 } else {
1078 reg = CSR_READ(sc, WMREG_STATUS);
1079 if (reg & STATUS_BUS64)
1080 sc->sc_flags |= WM_F_BUS64;
1081 if (sc->sc_type >= WM_T_82544 &&
1082 (reg & STATUS_PCIX_MODE) != 0) {
1083 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1084
1085 sc->sc_flags |= WM_F_PCIX;
1086 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1087 PCI_CAP_PCIX,
1088 &sc->sc_pcix_offset, NULL) == 0)
1089 aprint_error("%s: unable to find PCIX "
1090 "capability\n", sc->sc_dev.dv_xname);
1091 else if (sc->sc_type != WM_T_82545_3 &&
1092 sc->sc_type != WM_T_82546_3) {
1093 /*
1094 * Work around a problem caused by the BIOS
1095 * setting the max memory read byte count
1096 * incorrectly.
1097 */
1098 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1099 sc->sc_pcix_offset + PCI_PCIX_CMD);
1100 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1101 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1102
1103 bytecnt =
1104 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1105 PCI_PCIX_CMD_BYTECNT_SHIFT;
1106 maxb =
1107 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1108 PCI_PCIX_STATUS_MAXB_SHIFT;
1109 if (bytecnt > maxb) {
1110 aprint_verbose("%s: resetting PCI-X "
1111 "MMRBC: %d -> %d\n",
1112 sc->sc_dev.dv_xname,
1113 512 << bytecnt, 512 << maxb);
1114 pcix_cmd = (pcix_cmd &
1115 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1116 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1117 pci_conf_write(pa->pa_pc, pa->pa_tag,
1118 sc->sc_pcix_offset + PCI_PCIX_CMD,
1119 pcix_cmd);
1120 }
1121 }
1122 }
1123 /*
1124 * The quad port adapter is special; it has a PCIX-PCIX
1125 * bridge on the board, and can run the secondary bus at
1126 * a higher speed.
1127 */
1128 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1129 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1130 : 66;
1131 } else if (sc->sc_flags & WM_F_PCIX) {
1132 switch (reg & STATUS_PCIXSPD_MASK) {
1133 case STATUS_PCIXSPD_50_66:
1134 sc->sc_bus_speed = 66;
1135 break;
1136 case STATUS_PCIXSPD_66_100:
1137 sc->sc_bus_speed = 100;
1138 break;
1139 case STATUS_PCIXSPD_100_133:
1140 sc->sc_bus_speed = 133;
1141 break;
1142 default:
1143 aprint_error(
1144 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
1145 sc->sc_dev.dv_xname,
1146 reg & STATUS_PCIXSPD_MASK);
1147 sc->sc_bus_speed = 66;
1148 }
1149 } else
1150 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1151 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
1152 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1153 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1154 }
1155
1156 /*
1157 * Allocate the control data structures, and create and load the
1158 * DMA map for it.
1159 *
1160 * NOTE: All Tx descriptors must be in the same 4G segment of
1161 * memory. So must Rx descriptors. We simplify by allocating
1162 * both sets within the same 4G segment.
1163 */
1164 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1165 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1166 cdata_size = sc->sc_type < WM_T_82544 ?
1167 sizeof(struct wm_control_data_82542) :
1168 sizeof(struct wm_control_data_82544);
1169 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1170 (bus_size_t) 0x100000000ULL,
1171 &seg, 1, &rseg, 0)) != 0) {
1172 aprint_error(
1173 "%s: unable to allocate control data, error = %d\n",
1174 sc->sc_dev.dv_xname, error);
1175 goto fail_0;
1176 }
1177
1178 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1179 (void **)&sc->sc_control_data, 0)) != 0) {
1180 aprint_error("%s: unable to map control data, error = %d\n",
1181 sc->sc_dev.dv_xname, error);
1182 goto fail_1;
1183 }
1184
1185 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1186 0, 0, &sc->sc_cddmamap)) != 0) {
1187 aprint_error("%s: unable to create control data DMA map, "
1188 "error = %d\n", sc->sc_dev.dv_xname, error);
1189 goto fail_2;
1190 }
1191
1192 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1193 sc->sc_control_data, cdata_size, NULL,
1194 0)) != 0) {
1195 aprint_error(
1196 "%s: unable to load control data DMA map, error = %d\n",
1197 sc->sc_dev.dv_xname, error);
1198 goto fail_3;
1199 }
1200
1201
1202 /*
1203 * Create the transmit buffer DMA maps.
1204 */
1205 WM_TXQUEUELEN(sc) =
1206 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1207 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1208 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1209 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1210 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1211 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1212 aprint_error("%s: unable to create Tx DMA map %d, "
1213 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1214 goto fail_4;
1215 }
1216 }
1217
1218 /*
1219 * Create the receive buffer DMA maps.
1220 */
1221 for (i = 0; i < WM_NRXDESC; i++) {
1222 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1223 MCLBYTES, 0, 0,
1224 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1225 aprint_error("%s: unable to create Rx DMA map %d, "
1226 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1227 goto fail_5;
1228 }
1229 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1230 }
1231
1232 /* clear interesting stat counters */
1233 CSR_READ(sc, WMREG_COLC);
1234 CSR_READ(sc, WMREG_RXERRC);
1235
1236 /*
1237 * Reset the chip to a known state.
1238 */
1239 wm_reset(sc);
1240
1241 /*
1242 * Get some information about the EEPROM.
1243 */
1244 if (sc->sc_type == WM_T_ICH8) {
1245 uint32_t flash_size;
1246 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1247 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1248 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1249 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1250 printf("%s: can't map FLASH registers\n",
1251 sc->sc_dev.dv_xname);
1252 return;
1253 }
1254 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1255 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1256 ICH_FLASH_SECTOR_SIZE;
1257 sc->sc_ich8_flash_bank_size =
1258 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1259 sc->sc_ich8_flash_bank_size -=
1260 (flash_size & ICH_GFPREG_BASE_MASK);
1261 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1262 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1263 } else if (sc->sc_type == WM_T_80003)
1264 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1265 else if (sc->sc_type == WM_T_82573)
1266 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1267 else if (sc->sc_type > WM_T_82544)
1268 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1269
1270 if (sc->sc_type <= WM_T_82544)
1271 sc->sc_ee_addrbits = 6;
1272 else if (sc->sc_type <= WM_T_82546_3) {
1273 reg = CSR_READ(sc, WMREG_EECD);
1274 if (reg & EECD_EE_SIZE)
1275 sc->sc_ee_addrbits = 8;
1276 else
1277 sc->sc_ee_addrbits = 6;
1278 } else if (sc->sc_type <= WM_T_82547_2) {
1279 reg = CSR_READ(sc, WMREG_EECD);
1280 if (reg & EECD_EE_TYPE) {
1281 sc->sc_flags |= WM_F_EEPROM_SPI;
1282 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1283 } else
1284 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1285 } else if ((sc->sc_type == WM_T_82573) &&
1286 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1287 sc->sc_flags |= WM_F_EEPROM_FLASH;
1288 } else {
1289 /* Assume everything else is SPI. */
1290 reg = CSR_READ(sc, WMREG_EECD);
1291 sc->sc_flags |= WM_F_EEPROM_SPI;
1292 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1293 }
1294
1295 /*
1296 * Defer printing the EEPROM type until after verifying the checksum
1297 * This allows the EEPROM type to be printed correctly in the case
1298 * that no EEPROM is attached.
1299 */
1300
1301
1302 /*
1303 * Validate the EEPROM checksum. If the checksum fails, flag this for
1304 * later, so we can fail future reads from the EEPROM.
1305 */
1306 if (wm_validate_eeprom_checksum(sc))
1307 sc->sc_flags |= WM_F_EEPROM_INVALID;
1308
1309 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1310 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
1311 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1312 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
1313 } else {
1314 if (sc->sc_flags & WM_F_EEPROM_SPI)
1315 eetype = "SPI";
1316 else
1317 eetype = "MicroWire";
1318 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1319 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1320 sc->sc_ee_addrbits, eetype);
1321 }
1322
1323 /*
1324 * Read the Ethernet address from the EEPROM, if not first found
1325 * in device properties.
1326 */
1327 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1328 if (ea != NULL) {
1329 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1330 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1331 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1332 } else {
1333 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1334 sizeof(myea) / sizeof(myea[0]), myea)) {
1335 aprint_error("%s: unable to read Ethernet address\n",
1336 sc->sc_dev.dv_xname);
1337 return;
1338 }
1339 enaddr[0] = myea[0] & 0xff;
1340 enaddr[1] = myea[0] >> 8;
1341 enaddr[2] = myea[1] & 0xff;
1342 enaddr[3] = myea[1] >> 8;
1343 enaddr[4] = myea[2] & 0xff;
1344 enaddr[5] = myea[2] >> 8;
1345 }
1346
1347 /*
1348 * Toggle the LSB of the MAC address on the second port
1349 * of the dual port controller.
1350 */
1351 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1352 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1353 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1354 enaddr[5] ^= 1;
1355 }
1356
1357 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1358 ether_sprintf(enaddr));
1359
1360 /*
1361 * Read the config info from the EEPROM, and set up various
1362 * bits in the control registers based on their contents.
1363 */
1364 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1365 "i82543-cfg1");
1366 if (pn != NULL) {
1367 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1368 cfg1 = (uint16_t) prop_number_integer_value(pn);
1369 } else {
1370 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1371 aprint_error("%s: unable to read CFG1\n",
1372 sc->sc_dev.dv_xname);
1373 return;
1374 }
1375 }
1376
1377 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1378 "i82543-cfg2");
1379 if (pn != NULL) {
1380 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1381 cfg2 = (uint16_t) prop_number_integer_value(pn);
1382 } else {
1383 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1384 aprint_error("%s: unable to read CFG2\n",
1385 sc->sc_dev.dv_xname);
1386 return;
1387 }
1388 }
1389
1390 if (sc->sc_type >= WM_T_82544) {
1391 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1392 "i82543-swdpin");
1393 if (pn != NULL) {
1394 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1395 swdpin = (uint16_t) prop_number_integer_value(pn);
1396 } else {
1397 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1398 aprint_error("%s: unable to read SWDPIN\n",
1399 sc->sc_dev.dv_xname);
1400 return;
1401 }
1402 }
1403 }
1404
1405 if (cfg1 & EEPROM_CFG1_ILOS)
1406 sc->sc_ctrl |= CTRL_ILOS;
1407 if (sc->sc_type >= WM_T_82544) {
1408 sc->sc_ctrl |=
1409 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1410 CTRL_SWDPIO_SHIFT;
1411 sc->sc_ctrl |=
1412 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1413 CTRL_SWDPINS_SHIFT;
1414 } else {
1415 sc->sc_ctrl |=
1416 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1417 CTRL_SWDPIO_SHIFT;
1418 }
1419
1420 #if 0
1421 if (sc->sc_type >= WM_T_82544) {
1422 if (cfg1 & EEPROM_CFG1_IPS0)
1423 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1424 if (cfg1 & EEPROM_CFG1_IPS1)
1425 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1426 sc->sc_ctrl_ext |=
1427 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1428 CTRL_EXT_SWDPIO_SHIFT;
1429 sc->sc_ctrl_ext |=
1430 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1431 CTRL_EXT_SWDPINS_SHIFT;
1432 } else {
1433 sc->sc_ctrl_ext |=
1434 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1435 CTRL_EXT_SWDPIO_SHIFT;
1436 }
1437 #endif
1438
1439 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1440 #if 0
1441 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1442 #endif
1443
1444 /*
1445 * Set up some register offsets that are different between
1446 * the i82542 and the i82543 and later chips.
1447 */
1448 if (sc->sc_type < WM_T_82543) {
1449 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1450 sc->sc_tdt_reg = WMREG_OLD_TDT;
1451 } else {
1452 sc->sc_rdt_reg = WMREG_RDT;
1453 sc->sc_tdt_reg = WMREG_TDT;
1454 }
1455
1456 /*
1457 * Determine if we're TBI or GMII mode, and initialize the
1458 * media structures accordingly.
1459 */
1460 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_82573) {
1461 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1462 wm_gmii_mediainit(sc);
1463 } else if (sc->sc_type < WM_T_82543 ||
1464 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1465 if (wmp->wmp_flags & WMP_F_1000T)
1466 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1467 "product!\n", sc->sc_dev.dv_xname);
1468 wm_tbi_mediainit(sc);
1469 } else {
1470 if (wmp->wmp_flags & WMP_F_1000X)
1471 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1472 "product!\n", sc->sc_dev.dv_xname);
1473 wm_gmii_mediainit(sc);
1474 }
1475
1476 ifp = &sc->sc_ethercom.ec_if;
1477 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1478 ifp->if_softc = sc;
1479 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1480 ifp->if_ioctl = wm_ioctl;
1481 ifp->if_start = wm_start;
1482 ifp->if_watchdog = wm_watchdog;
1483 ifp->if_init = wm_init;
1484 ifp->if_stop = wm_stop;
1485 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1486 IFQ_SET_READY(&ifp->if_snd);
1487
1488 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1489 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1490
1491 /*
1492 * If we're a i82543 or greater, we can support VLANs.
1493 */
1494 if (sc->sc_type >= WM_T_82543)
1495 sc->sc_ethercom.ec_capabilities |=
1496 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1497
1498 /*
1499 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1500 * on i82543 and later.
1501 */
1502 if (sc->sc_type >= WM_T_82543) {
1503 ifp->if_capabilities |=
1504 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1505 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1506 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1507 IFCAP_CSUM_TCPv6_Tx |
1508 IFCAP_CSUM_UDPv6_Tx;
1509 }
1510
1511 /*
1512 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1513 *
1514 * 82541GI (8086:1076) ... no
1515 * 82572EI (8086:10b9) ... yes
1516 */
1517 if (sc->sc_type >= WM_T_82571) {
1518 ifp->if_capabilities |=
1519 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1520 }
1521
1522 /*
1523 * If we're a i82544 or greater (except i82547), we can do
1524 * TCP segmentation offload.
1525 */
1526 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1527 ifp->if_capabilities |= IFCAP_TSOv4;
1528 }
1529
1530 if (sc->sc_type >= WM_T_82571) {
1531 ifp->if_capabilities |= IFCAP_TSOv6;
1532 }
1533
1534 /*
1535 * Attach the interface.
1536 */
1537 if_attach(ifp);
1538 ether_ifattach(ifp, enaddr);
1539 #if NRND > 0
1540 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1541 RND_TYPE_NET, 0);
1542 #endif
1543
1544 #ifdef WM_EVENT_COUNTERS
1545 /* Attach event counters. */
1546 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1547 NULL, sc->sc_dev.dv_xname, "txsstall");
1548 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1549 NULL, sc->sc_dev.dv_xname, "txdstall");
1550 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1551 NULL, sc->sc_dev.dv_xname, "txfifo_stall");
1552 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1553 NULL, sc->sc_dev.dv_xname, "txdw");
1554 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1555 NULL, sc->sc_dev.dv_xname, "txqe");
1556 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1557 NULL, sc->sc_dev.dv_xname, "rxintr");
1558 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1559 NULL, sc->sc_dev.dv_xname, "linkintr");
1560
1561 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1562 NULL, sc->sc_dev.dv_xname, "rxipsum");
1563 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1564 NULL, sc->sc_dev.dv_xname, "rxtusum");
1565 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1566 NULL, sc->sc_dev.dv_xname, "txipsum");
1567 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1568 NULL, sc->sc_dev.dv_xname, "txtusum");
1569 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1570 NULL, sc->sc_dev.dv_xname, "txtusum6");
1571
1572 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1573 NULL, sc->sc_dev.dv_xname, "txtso");
1574 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1575 NULL, sc->sc_dev.dv_xname, "txtso6");
1576 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1577 NULL, sc->sc_dev.dv_xname, "txtsopain");
1578
1579 for (i = 0; i < WM_NTXSEGS; i++) {
1580 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1581 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1582 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1583 }
1584
1585 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1586 NULL, sc->sc_dev.dv_xname, "txdrop");
1587
1588 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1589 NULL, sc->sc_dev.dv_xname, "tu");
1590
1591 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1592 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1593 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1594 NULL, sc->sc_dev.dv_xname, "tx_xon");
1595 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1596 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1597 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1598 NULL, sc->sc_dev.dv_xname, "rx_xon");
1599 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1600 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1601 #endif /* WM_EVENT_COUNTERS */
1602
1603 /*
1604 * Make sure the interface is shutdown during reboot.
1605 */
1606 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1607 if (sc->sc_sdhook == NULL)
1608 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1609 sc->sc_dev.dv_xname);
1610
1611 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
1612 wm_powerhook, sc);
1613 if (sc->sc_powerhook == NULL)
1614 aprint_error("%s: can't establish powerhook\n",
1615 sc->sc_dev.dv_xname);
1616 return;
1617
1618 /*
1619 * Free any resources we've allocated during the failed attach
1620 * attempt. Do this in reverse order and fall through.
1621 */
1622 fail_5:
1623 for (i = 0; i < WM_NRXDESC; i++) {
1624 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1625 bus_dmamap_destroy(sc->sc_dmat,
1626 sc->sc_rxsoft[i].rxs_dmamap);
1627 }
1628 fail_4:
1629 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1630 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1631 bus_dmamap_destroy(sc->sc_dmat,
1632 sc->sc_txsoft[i].txs_dmamap);
1633 }
1634 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1635 fail_3:
1636 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1637 fail_2:
1638 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1639 cdata_size);
1640 fail_1:
1641 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1642 fail_0:
1643 return;
1644 }
1645
1646 /*
1647 * wm_shutdown:
1648 *
1649 * Make sure the interface is stopped at reboot time.
1650 */
1651 static void
1652 wm_shutdown(void *arg)
1653 {
1654 struct wm_softc *sc = arg;
1655
1656 wm_stop(&sc->sc_ethercom.ec_if, 1);
1657 }
1658
1659 static void
1660 wm_powerhook(int why, void *arg)
1661 {
1662 struct wm_softc *sc = arg;
1663 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1664 pci_chipset_tag_t pc = sc->sc_pc;
1665 pcitag_t tag = sc->sc_pcitag;
1666
1667 switch (why) {
1668 case PWR_SOFTSUSPEND:
1669 wm_shutdown(sc);
1670 break;
1671 case PWR_SOFTRESUME:
1672 ifp->if_flags &= ~IFF_RUNNING;
1673 wm_init(ifp);
1674 if (ifp->if_flags & IFF_RUNNING)
1675 wm_start(ifp);
1676 break;
1677 case PWR_SUSPEND:
1678 pci_conf_capture(pc, tag, &sc->sc_pciconf);
1679 break;
1680 case PWR_RESUME:
1681 pci_conf_restore(pc, tag, &sc->sc_pciconf);
1682 break;
1683 }
1684
1685 return;
1686 }
1687
1688 /*
1689 * wm_tx_offload:
1690 *
1691 * Set up TCP/IP checksumming parameters for the
1692 * specified packet.
1693 */
1694 static int
1695 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1696 uint8_t *fieldsp)
1697 {
1698 struct mbuf *m0 = txs->txs_mbuf;
1699 struct livengood_tcpip_ctxdesc *t;
1700 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1701 uint32_t ipcse;
1702 struct ether_header *eh;
1703 int offset, iphl;
1704 uint8_t fields;
1705
1706 /*
1707 * XXX It would be nice if the mbuf pkthdr had offset
1708 * fields for the protocol headers.
1709 */
1710
1711 eh = mtod(m0, struct ether_header *);
1712 switch (htons(eh->ether_type)) {
1713 case ETHERTYPE_IP:
1714 case ETHERTYPE_IPV6:
1715 offset = ETHER_HDR_LEN;
1716 break;
1717
1718 case ETHERTYPE_VLAN:
1719 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1720 break;
1721
1722 default:
1723 /*
1724 * Don't support this protocol or encapsulation.
1725 */
1726 *fieldsp = 0;
1727 *cmdp = 0;
1728 return (0);
1729 }
1730
1731 if ((m0->m_pkthdr.csum_flags &
1732 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1733 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1734 } else {
1735 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1736 }
1737 ipcse = offset + iphl - 1;
1738
1739 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1740 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1741 seg = 0;
1742 fields = 0;
1743
1744 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1745 int hlen = offset + iphl;
1746 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1747
1748 if (__predict_false(m0->m_len <
1749 (hlen + sizeof(struct tcphdr)))) {
1750 /*
1751 * TCP/IP headers are not in the first mbuf; we need
1752 * to do this the slow and painful way. Let's just
1753 * hope this doesn't happen very often.
1754 */
1755 struct tcphdr th;
1756
1757 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1758
1759 m_copydata(m0, hlen, sizeof(th), &th);
1760 if (v4) {
1761 struct ip ip;
1762
1763 m_copydata(m0, offset, sizeof(ip), &ip);
1764 ip.ip_len = 0;
1765 m_copyback(m0,
1766 offset + offsetof(struct ip, ip_len),
1767 sizeof(ip.ip_len), &ip.ip_len);
1768 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1769 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1770 } else {
1771 struct ip6_hdr ip6;
1772
1773 m_copydata(m0, offset, sizeof(ip6), &ip6);
1774 ip6.ip6_plen = 0;
1775 m_copyback(m0,
1776 offset + offsetof(struct ip6_hdr, ip6_plen),
1777 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1778 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1779 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1780 }
1781 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1782 sizeof(th.th_sum), &th.th_sum);
1783
1784 hlen += th.th_off << 2;
1785 } else {
1786 /*
1787 * TCP/IP headers are in the first mbuf; we can do
1788 * this the easy way.
1789 */
1790 struct tcphdr *th;
1791
1792 if (v4) {
1793 struct ip *ip =
1794 (void *)(mtod(m0, char *) + offset);
1795 th = (void *)(mtod(m0, char *) + hlen);
1796
1797 ip->ip_len = 0;
1798 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1799 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1800 } else {
1801 struct ip6_hdr *ip6 =
1802 (void *)(mtod(m0, char *) + offset);
1803 th = (void *)(mtod(m0, char *) + hlen);
1804
1805 ip6->ip6_plen = 0;
1806 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1807 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1808 }
1809 hlen += th->th_off << 2;
1810 }
1811
1812 if (v4) {
1813 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1814 cmdlen |= WTX_TCPIP_CMD_IP;
1815 } else {
1816 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1817 ipcse = 0;
1818 }
1819 cmd |= WTX_TCPIP_CMD_TSE;
1820 cmdlen |= WTX_TCPIP_CMD_TSE |
1821 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1822 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1823 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1824 }
1825
1826 /*
1827 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1828 * offload feature, if we load the context descriptor, we
1829 * MUST provide valid values for IPCSS and TUCSS fields.
1830 */
1831
1832 ipcs = WTX_TCPIP_IPCSS(offset) |
1833 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1834 WTX_TCPIP_IPCSE(ipcse);
1835 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1836 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1837 fields |= WTX_IXSM;
1838 }
1839
1840 offset += iphl;
1841
1842 if (m0->m_pkthdr.csum_flags &
1843 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1844 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1845 fields |= WTX_TXSM;
1846 tucs = WTX_TCPIP_TUCSS(offset) |
1847 WTX_TCPIP_TUCSO(offset +
1848 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1849 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1850 } else if ((m0->m_pkthdr.csum_flags &
1851 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1852 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1853 fields |= WTX_TXSM;
1854 tucs = WTX_TCPIP_TUCSS(offset) |
1855 WTX_TCPIP_TUCSO(offset +
1856 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1857 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1858 } else {
1859 /* Just initialize it to a valid TCP context. */
1860 tucs = WTX_TCPIP_TUCSS(offset) |
1861 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1862 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1863 }
1864
1865 /* Fill in the context descriptor. */
1866 t = (struct livengood_tcpip_ctxdesc *)
1867 &sc->sc_txdescs[sc->sc_txnext];
1868 t->tcpip_ipcs = htole32(ipcs);
1869 t->tcpip_tucs = htole32(tucs);
1870 t->tcpip_cmdlen = htole32(cmdlen);
1871 t->tcpip_seg = htole32(seg);
1872 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1873
1874 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1875 txs->txs_ndesc++;
1876
1877 *cmdp = cmd;
1878 *fieldsp = fields;
1879
1880 return (0);
1881 }
1882
1883 static void
1884 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1885 {
1886 struct mbuf *m;
1887 int i;
1888
1889 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
1890 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1891 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1892 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
1893 m->m_data, m->m_len, m->m_flags);
1894 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
1895 i, i == 1 ? "" : "s");
1896 }
1897
1898 /*
1899 * wm_82547_txfifo_stall:
1900 *
1901 * Callout used to wait for the 82547 Tx FIFO to drain,
1902 * reset the FIFO pointers, and restart packet transmission.
1903 */
1904 static void
1905 wm_82547_txfifo_stall(void *arg)
1906 {
1907 struct wm_softc *sc = arg;
1908 int s;
1909
1910 s = splnet();
1911
1912 if (sc->sc_txfifo_stall) {
1913 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1914 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1915 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1916 /*
1917 * Packets have drained. Stop transmitter, reset
1918 * FIFO pointers, restart transmitter, and kick
1919 * the packet queue.
1920 */
1921 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1922 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1923 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1924 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1925 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1926 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1927 CSR_WRITE(sc, WMREG_TCTL, tctl);
1928 CSR_WRITE_FLUSH(sc);
1929
1930 sc->sc_txfifo_head = 0;
1931 sc->sc_txfifo_stall = 0;
1932 wm_start(&sc->sc_ethercom.ec_if);
1933 } else {
1934 /*
1935 * Still waiting for packets to drain; try again in
1936 * another tick.
1937 */
1938 callout_schedule(&sc->sc_txfifo_ch, 1);
1939 }
1940 }
1941
1942 splx(s);
1943 }
1944
1945 /*
1946 * wm_82547_txfifo_bugchk:
1947 *
1948 * Check for bug condition in the 82547 Tx FIFO. We need to
1949 * prevent enqueueing a packet that would wrap around the end
1950 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1951 *
1952 * We do this by checking the amount of space before the end
1953 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1954 * the Tx FIFO, wait for all remaining packets to drain, reset
1955 * the internal FIFO pointers to the beginning, and restart
1956 * transmission on the interface.
1957 */
1958 #define WM_FIFO_HDR 0x10
1959 #define WM_82547_PAD_LEN 0x3e0
1960 static int
1961 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1962 {
1963 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1964 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1965
1966 /* Just return if already stalled. */
1967 if (sc->sc_txfifo_stall)
1968 return (1);
1969
1970 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1971 /* Stall only occurs in half-duplex mode. */
1972 goto send_packet;
1973 }
1974
1975 if (len >= WM_82547_PAD_LEN + space) {
1976 sc->sc_txfifo_stall = 1;
1977 callout_schedule(&sc->sc_txfifo_ch, 1);
1978 return (1);
1979 }
1980
1981 send_packet:
1982 sc->sc_txfifo_head += len;
1983 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1984 sc->sc_txfifo_head -= sc->sc_txfifo_size;
1985
1986 return (0);
1987 }
1988
1989 /*
1990 * wm_start: [ifnet interface function]
1991 *
1992 * Start packet transmission on the interface.
1993 */
1994 static void
1995 wm_start(struct ifnet *ifp)
1996 {
1997 struct wm_softc *sc = ifp->if_softc;
1998 struct mbuf *m0;
1999 #if 0 /* XXXJRT */
2000 struct m_tag *mtag;
2001 #endif
2002 struct wm_txsoft *txs;
2003 bus_dmamap_t dmamap;
2004 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2005 bus_addr_t curaddr;
2006 bus_size_t seglen, curlen;
2007 uint32_t cksumcmd;
2008 uint8_t cksumfields;
2009
2010 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2011 return;
2012
2013 /*
2014 * Remember the previous number of free descriptors.
2015 */
2016 ofree = sc->sc_txfree;
2017
2018 /*
2019 * Loop through the send queue, setting up transmit descriptors
2020 * until we drain the queue, or use up all available transmit
2021 * descriptors.
2022 */
2023 for (;;) {
2024 /* Grab a packet off the queue. */
2025 IFQ_POLL(&ifp->if_snd, m0);
2026 if (m0 == NULL)
2027 break;
2028
2029 DPRINTF(WM_DEBUG_TX,
2030 ("%s: TX: have packet to transmit: %p\n",
2031 sc->sc_dev.dv_xname, m0));
2032
2033 /* Get a work queue entry. */
2034 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2035 wm_txintr(sc);
2036 if (sc->sc_txsfree == 0) {
2037 DPRINTF(WM_DEBUG_TX,
2038 ("%s: TX: no free job descriptors\n",
2039 sc->sc_dev.dv_xname));
2040 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2041 break;
2042 }
2043 }
2044
2045 txs = &sc->sc_txsoft[sc->sc_txsnext];
2046 dmamap = txs->txs_dmamap;
2047
2048 use_tso = (m0->m_pkthdr.csum_flags &
2049 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2050
2051 /*
2052 * So says the Linux driver:
2053 * The controller does a simple calculation to make sure
2054 * there is enough room in the FIFO before initiating the
2055 * DMA for each buffer. The calc is:
2056 * 4 = ceil(buffer len / MSS)
2057 * To make sure we don't overrun the FIFO, adjust the max
2058 * buffer len if the MSS drops.
2059 */
2060 dmamap->dm_maxsegsz =
2061 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2062 ? m0->m_pkthdr.segsz << 2
2063 : WTX_MAX_LEN;
2064
2065 /*
2066 * Load the DMA map. If this fails, the packet either
2067 * didn't fit in the allotted number of segments, or we
2068 * were short on resources. For the too-many-segments
2069 * case, we simply report an error and drop the packet,
2070 * since we can't sanely copy a jumbo packet to a single
2071 * buffer.
2072 */
2073 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2074 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2075 if (error) {
2076 if (error == EFBIG) {
2077 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2078 log(LOG_ERR, "%s: Tx packet consumes too many "
2079 "DMA segments, dropping...\n",
2080 sc->sc_dev.dv_xname);
2081 IFQ_DEQUEUE(&ifp->if_snd, m0);
2082 wm_dump_mbuf_chain(sc, m0);
2083 m_freem(m0);
2084 continue;
2085 }
2086 /*
2087 * Short on resources, just stop for now.
2088 */
2089 DPRINTF(WM_DEBUG_TX,
2090 ("%s: TX: dmamap load failed: %d\n",
2091 sc->sc_dev.dv_xname, error));
2092 break;
2093 }
2094
2095 segs_needed = dmamap->dm_nsegs;
2096 if (use_tso) {
2097 /* For sentinel descriptor; see below. */
2098 segs_needed++;
2099 }
2100
2101 /*
2102 * Ensure we have enough descriptors free to describe
2103 * the packet. Note, we always reserve one descriptor
2104 * at the end of the ring due to the semantics of the
2105 * TDT register, plus one more in the event we need
2106 * to load offload context.
2107 */
2108 if (segs_needed > sc->sc_txfree - 2) {
2109 /*
2110 * Not enough free descriptors to transmit this
2111 * packet. We haven't committed anything yet,
2112 * so just unload the DMA map, put the packet
2113 * pack on the queue, and punt. Notify the upper
2114 * layer that there are no more slots left.
2115 */
2116 DPRINTF(WM_DEBUG_TX,
2117 ("%s: TX: need %d (%d) descriptors, have %d\n",
2118 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
2119 sc->sc_txfree - 1));
2120 ifp->if_flags |= IFF_OACTIVE;
2121 bus_dmamap_unload(sc->sc_dmat, dmamap);
2122 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2123 break;
2124 }
2125
2126 /*
2127 * Check for 82547 Tx FIFO bug. We need to do this
2128 * once we know we can transmit the packet, since we
2129 * do some internal FIFO space accounting here.
2130 */
2131 if (sc->sc_type == WM_T_82547 &&
2132 wm_82547_txfifo_bugchk(sc, m0)) {
2133 DPRINTF(WM_DEBUG_TX,
2134 ("%s: TX: 82547 Tx FIFO bug detected\n",
2135 sc->sc_dev.dv_xname));
2136 ifp->if_flags |= IFF_OACTIVE;
2137 bus_dmamap_unload(sc->sc_dmat, dmamap);
2138 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2139 break;
2140 }
2141
2142 IFQ_DEQUEUE(&ifp->if_snd, m0);
2143
2144 /*
2145 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2146 */
2147
2148 DPRINTF(WM_DEBUG_TX,
2149 ("%s: TX: packet has %d (%d) DMA segments\n",
2150 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
2151
2152 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2153
2154 /*
2155 * Store a pointer to the packet so that we can free it
2156 * later.
2157 *
2158 * Initially, we consider the number of descriptors the
2159 * packet uses the number of DMA segments. This may be
2160 * incremented by 1 if we do checksum offload (a descriptor
2161 * is used to set the checksum context).
2162 */
2163 txs->txs_mbuf = m0;
2164 txs->txs_firstdesc = sc->sc_txnext;
2165 txs->txs_ndesc = segs_needed;
2166
2167 /* Set up offload parameters for this packet. */
2168 if (m0->m_pkthdr.csum_flags &
2169 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2170 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2171 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2172 if (wm_tx_offload(sc, txs, &cksumcmd,
2173 &cksumfields) != 0) {
2174 /* Error message already displayed. */
2175 bus_dmamap_unload(sc->sc_dmat, dmamap);
2176 continue;
2177 }
2178 } else {
2179 cksumcmd = 0;
2180 cksumfields = 0;
2181 }
2182
2183 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2184
2185 /* Sync the DMA map. */
2186 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2187 BUS_DMASYNC_PREWRITE);
2188
2189 /*
2190 * Initialize the transmit descriptor.
2191 */
2192 for (nexttx = sc->sc_txnext, seg = 0;
2193 seg < dmamap->dm_nsegs; seg++) {
2194 for (seglen = dmamap->dm_segs[seg].ds_len,
2195 curaddr = dmamap->dm_segs[seg].ds_addr;
2196 seglen != 0;
2197 curaddr += curlen, seglen -= curlen,
2198 nexttx = WM_NEXTTX(sc, nexttx)) {
2199 curlen = seglen;
2200
2201 /*
2202 * So says the Linux driver:
2203 * Work around for premature descriptor
2204 * write-backs in TSO mode. Append a
2205 * 4-byte sentinel descriptor.
2206 */
2207 if (use_tso &&
2208 seg == dmamap->dm_nsegs - 1 &&
2209 curlen > 8)
2210 curlen -= 4;
2211
2212 wm_set_dma_addr(
2213 &sc->sc_txdescs[nexttx].wtx_addr,
2214 curaddr);
2215 sc->sc_txdescs[nexttx].wtx_cmdlen =
2216 htole32(cksumcmd | curlen);
2217 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2218 0;
2219 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2220 cksumfields;
2221 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2222 lasttx = nexttx;
2223
2224 DPRINTF(WM_DEBUG_TX,
2225 ("%s: TX: desc %d: low 0x%08lx, "
2226 "len 0x%04x\n",
2227 sc->sc_dev.dv_xname, nexttx,
2228 curaddr & 0xffffffffUL, (unsigned)curlen));
2229 }
2230 }
2231
2232 KASSERT(lasttx != -1);
2233
2234 /*
2235 * Set up the command byte on the last descriptor of
2236 * the packet. If we're in the interrupt delay window,
2237 * delay the interrupt.
2238 */
2239 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2240 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2241
2242 #if 0 /* XXXJRT */
2243 /*
2244 * If VLANs are enabled and the packet has a VLAN tag, set
2245 * up the descriptor to encapsulate the packet for us.
2246 *
2247 * This is only valid on the last descriptor of the packet.
2248 */
2249 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2250 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2251 htole32(WTX_CMD_VLE);
2252 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2253 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2254 }
2255 #endif /* XXXJRT */
2256
2257 txs->txs_lastdesc = lasttx;
2258
2259 DPRINTF(WM_DEBUG_TX,
2260 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
2261 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2262
2263 /* Sync the descriptors we're using. */
2264 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2265 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2266
2267 /* Give the packet to the chip. */
2268 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2269
2270 DPRINTF(WM_DEBUG_TX,
2271 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
2272
2273 DPRINTF(WM_DEBUG_TX,
2274 ("%s: TX: finished transmitting packet, job %d\n",
2275 sc->sc_dev.dv_xname, sc->sc_txsnext));
2276
2277 /* Advance the tx pointer. */
2278 sc->sc_txfree -= txs->txs_ndesc;
2279 sc->sc_txnext = nexttx;
2280
2281 sc->sc_txsfree--;
2282 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2283
2284 #if NBPFILTER > 0
2285 /* Pass the packet to any BPF listeners. */
2286 if (ifp->if_bpf)
2287 bpf_mtap(ifp->if_bpf, m0);
2288 #endif /* NBPFILTER > 0 */
2289 }
2290
2291 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2292 /* No more slots; notify upper layer. */
2293 ifp->if_flags |= IFF_OACTIVE;
2294 }
2295
2296 if (sc->sc_txfree != ofree) {
2297 /* Set a watchdog timer in case the chip flakes out. */
2298 ifp->if_timer = 5;
2299 }
2300 }
2301
2302 /*
2303 * wm_watchdog: [ifnet interface function]
2304 *
2305 * Watchdog timer handler.
2306 */
2307 static void
2308 wm_watchdog(struct ifnet *ifp)
2309 {
2310 struct wm_softc *sc = ifp->if_softc;
2311
2312 /*
2313 * Since we're using delayed interrupts, sweep up
2314 * before we report an error.
2315 */
2316 wm_txintr(sc);
2317
2318 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2319 log(LOG_ERR,
2320 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2321 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
2322 sc->sc_txnext);
2323 ifp->if_oerrors++;
2324
2325 /* Reset the interface. */
2326 (void) wm_init(ifp);
2327 }
2328
2329 /* Try to get more packets going. */
2330 wm_start(ifp);
2331 }
2332
2333 /*
2334 * wm_ioctl: [ifnet interface function]
2335 *
2336 * Handle control requests from the operator.
2337 */
2338 static int
2339 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2340 {
2341 struct wm_softc *sc = ifp->if_softc;
2342 struct ifreq *ifr = (struct ifreq *) data;
2343 int s, error;
2344
2345 s = splnet();
2346
2347 switch (cmd) {
2348 case SIOCSIFMEDIA:
2349 case SIOCGIFMEDIA:
2350 /* Flow control requires full-duplex mode. */
2351 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2352 (ifr->ifr_media & IFM_FDX) == 0)
2353 ifr->ifr_media &= ~IFM_ETH_FMASK;
2354 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2355 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2356 /* We can do both TXPAUSE and RXPAUSE. */
2357 ifr->ifr_media |=
2358 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2359 }
2360 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2361 }
2362 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2363 break;
2364 default:
2365 error = ether_ioctl(ifp, cmd, data);
2366 if (error == ENETRESET) {
2367 /*
2368 * Multicast list has changed; set the hardware filter
2369 * accordingly.
2370 */
2371 if (ifp->if_flags & IFF_RUNNING)
2372 wm_set_filter(sc);
2373 error = 0;
2374 }
2375 break;
2376 }
2377
2378 /* Try to get more packets going. */
2379 wm_start(ifp);
2380
2381 splx(s);
2382 return (error);
2383 }
2384
2385 /*
2386 * wm_intr:
2387 *
2388 * Interrupt service routine.
2389 */
2390 static int
2391 wm_intr(void *arg)
2392 {
2393 struct wm_softc *sc = arg;
2394 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2395 uint32_t icr;
2396 int handled = 0;
2397
2398 while (1 /* CONSTCOND */) {
2399 icr = CSR_READ(sc, WMREG_ICR);
2400 if ((icr & sc->sc_icr) == 0)
2401 break;
2402 #if 0 /*NRND > 0*/
2403 if (RND_ENABLED(&sc->rnd_source))
2404 rnd_add_uint32(&sc->rnd_source, icr);
2405 #endif
2406
2407 handled = 1;
2408
2409 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2410 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2411 DPRINTF(WM_DEBUG_RX,
2412 ("%s: RX: got Rx intr 0x%08x\n",
2413 sc->sc_dev.dv_xname,
2414 icr & (ICR_RXDMT0|ICR_RXT0)));
2415 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2416 }
2417 #endif
2418 wm_rxintr(sc);
2419
2420 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2421 if (icr & ICR_TXDW) {
2422 DPRINTF(WM_DEBUG_TX,
2423 ("%s: TX: got TXDW interrupt\n",
2424 sc->sc_dev.dv_xname));
2425 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2426 }
2427 #endif
2428 wm_txintr(sc);
2429
2430 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2431 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2432 wm_linkintr(sc, icr);
2433 }
2434
2435 if (icr & ICR_RXO) {
2436 ifp->if_ierrors++;
2437 #if defined(WM_DEBUG)
2438 log(LOG_WARNING, "%s: Receive overrun\n",
2439 sc->sc_dev.dv_xname);
2440 #endif /* defined(WM_DEBUG) */
2441 }
2442 }
2443
2444 if (handled) {
2445 /* Try to get more packets going. */
2446 wm_start(ifp);
2447 }
2448
2449 return (handled);
2450 }
2451
2452 /*
2453 * wm_txintr:
2454 *
2455 * Helper; handle transmit interrupts.
2456 */
2457 static void
2458 wm_txintr(struct wm_softc *sc)
2459 {
2460 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2461 struct wm_txsoft *txs;
2462 uint8_t status;
2463 int i;
2464
2465 ifp->if_flags &= ~IFF_OACTIVE;
2466
2467 /*
2468 * Go through the Tx list and free mbufs for those
2469 * frames which have been transmitted.
2470 */
2471 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2472 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2473 txs = &sc->sc_txsoft[i];
2474
2475 DPRINTF(WM_DEBUG_TX,
2476 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
2477
2478 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2479 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2480
2481 status =
2482 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2483 if ((status & WTX_ST_DD) == 0) {
2484 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2485 BUS_DMASYNC_PREREAD);
2486 break;
2487 }
2488
2489 DPRINTF(WM_DEBUG_TX,
2490 ("%s: TX: job %d done: descs %d..%d\n",
2491 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
2492 txs->txs_lastdesc));
2493
2494 /*
2495 * XXX We should probably be using the statistics
2496 * XXX registers, but I don't know if they exist
2497 * XXX on chips before the i82544.
2498 */
2499
2500 #ifdef WM_EVENT_COUNTERS
2501 if (status & WTX_ST_TU)
2502 WM_EVCNT_INCR(&sc->sc_ev_tu);
2503 #endif /* WM_EVENT_COUNTERS */
2504
2505 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2506 ifp->if_oerrors++;
2507 if (status & WTX_ST_LC)
2508 log(LOG_WARNING, "%s: late collision\n",
2509 sc->sc_dev.dv_xname);
2510 else if (status & WTX_ST_EC) {
2511 ifp->if_collisions += 16;
2512 log(LOG_WARNING, "%s: excessive collisions\n",
2513 sc->sc_dev.dv_xname);
2514 }
2515 } else
2516 ifp->if_opackets++;
2517
2518 sc->sc_txfree += txs->txs_ndesc;
2519 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2520 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2521 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2522 m_freem(txs->txs_mbuf);
2523 txs->txs_mbuf = NULL;
2524 }
2525
2526 /* Update the dirty transmit buffer pointer. */
2527 sc->sc_txsdirty = i;
2528 DPRINTF(WM_DEBUG_TX,
2529 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
2530
2531 /*
2532 * If there are no more pending transmissions, cancel the watchdog
2533 * timer.
2534 */
2535 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2536 ifp->if_timer = 0;
2537 }
2538
2539 /*
2540 * wm_rxintr:
2541 *
2542 * Helper; handle receive interrupts.
2543 */
2544 static void
2545 wm_rxintr(struct wm_softc *sc)
2546 {
2547 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2548 struct wm_rxsoft *rxs;
2549 struct mbuf *m;
2550 int i, len;
2551 uint8_t status, errors;
2552
2553 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2554 rxs = &sc->sc_rxsoft[i];
2555
2556 DPRINTF(WM_DEBUG_RX,
2557 ("%s: RX: checking descriptor %d\n",
2558 sc->sc_dev.dv_xname, i));
2559
2560 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2561
2562 status = sc->sc_rxdescs[i].wrx_status;
2563 errors = sc->sc_rxdescs[i].wrx_errors;
2564 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2565
2566 if ((status & WRX_ST_DD) == 0) {
2567 /*
2568 * We have processed all of the receive descriptors.
2569 */
2570 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2571 break;
2572 }
2573
2574 if (__predict_false(sc->sc_rxdiscard)) {
2575 DPRINTF(WM_DEBUG_RX,
2576 ("%s: RX: discarding contents of descriptor %d\n",
2577 sc->sc_dev.dv_xname, i));
2578 WM_INIT_RXDESC(sc, i);
2579 if (status & WRX_ST_EOP) {
2580 /* Reset our state. */
2581 DPRINTF(WM_DEBUG_RX,
2582 ("%s: RX: resetting rxdiscard -> 0\n",
2583 sc->sc_dev.dv_xname));
2584 sc->sc_rxdiscard = 0;
2585 }
2586 continue;
2587 }
2588
2589 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2590 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2591
2592 m = rxs->rxs_mbuf;
2593
2594 /*
2595 * Add a new receive buffer to the ring, unless of
2596 * course the length is zero. Treat the latter as a
2597 * failed mapping.
2598 */
2599 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2600 /*
2601 * Failed, throw away what we've done so
2602 * far, and discard the rest of the packet.
2603 */
2604 ifp->if_ierrors++;
2605 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2606 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2607 WM_INIT_RXDESC(sc, i);
2608 if ((status & WRX_ST_EOP) == 0)
2609 sc->sc_rxdiscard = 1;
2610 if (sc->sc_rxhead != NULL)
2611 m_freem(sc->sc_rxhead);
2612 WM_RXCHAIN_RESET(sc);
2613 DPRINTF(WM_DEBUG_RX,
2614 ("%s: RX: Rx buffer allocation failed, "
2615 "dropping packet%s\n", sc->sc_dev.dv_xname,
2616 sc->sc_rxdiscard ? " (discard)" : ""));
2617 continue;
2618 }
2619
2620 WM_RXCHAIN_LINK(sc, m);
2621
2622 m->m_len = len;
2623
2624 DPRINTF(WM_DEBUG_RX,
2625 ("%s: RX: buffer at %p len %d\n",
2626 sc->sc_dev.dv_xname, m->m_data, len));
2627
2628 /*
2629 * If this is not the end of the packet, keep
2630 * looking.
2631 */
2632 if ((status & WRX_ST_EOP) == 0) {
2633 sc->sc_rxlen += len;
2634 DPRINTF(WM_DEBUG_RX,
2635 ("%s: RX: not yet EOP, rxlen -> %d\n",
2636 sc->sc_dev.dv_xname, sc->sc_rxlen));
2637 continue;
2638 }
2639
2640 /*
2641 * Okay, we have the entire packet now. The chip is
2642 * configured to include the FCS (not all chips can
2643 * be configured to strip it), so we need to trim it.
2644 */
2645 m->m_len -= ETHER_CRC_LEN;
2646
2647 *sc->sc_rxtailp = NULL;
2648 len = m->m_len + sc->sc_rxlen;
2649 m = sc->sc_rxhead;
2650
2651 WM_RXCHAIN_RESET(sc);
2652
2653 DPRINTF(WM_DEBUG_RX,
2654 ("%s: RX: have entire packet, len -> %d\n",
2655 sc->sc_dev.dv_xname, len));
2656
2657 /*
2658 * If an error occurred, update stats and drop the packet.
2659 */
2660 if (errors &
2661 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2662 ifp->if_ierrors++;
2663 if (errors & WRX_ER_SE)
2664 log(LOG_WARNING, "%s: symbol error\n",
2665 sc->sc_dev.dv_xname);
2666 else if (errors & WRX_ER_SEQ)
2667 log(LOG_WARNING, "%s: receive sequence error\n",
2668 sc->sc_dev.dv_xname);
2669 else if (errors & WRX_ER_CE)
2670 log(LOG_WARNING, "%s: CRC error\n",
2671 sc->sc_dev.dv_xname);
2672 m_freem(m);
2673 continue;
2674 }
2675
2676 /*
2677 * No errors. Receive the packet.
2678 */
2679 m->m_pkthdr.rcvif = ifp;
2680 m->m_pkthdr.len = len;
2681
2682 #if 0 /* XXXJRT */
2683 /*
2684 * If VLANs are enabled, VLAN packets have been unwrapped
2685 * for us. Associate the tag with the packet.
2686 */
2687 if ((status & WRX_ST_VP) != 0) {
2688 VLAN_INPUT_TAG(ifp, m,
2689 le16toh(sc->sc_rxdescs[i].wrx_special,
2690 continue);
2691 }
2692 #endif /* XXXJRT */
2693
2694 /*
2695 * Set up checksum info for this packet.
2696 */
2697 if ((status & WRX_ST_IXSM) == 0) {
2698 if (status & WRX_ST_IPCS) {
2699 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2700 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2701 if (errors & WRX_ER_IPE)
2702 m->m_pkthdr.csum_flags |=
2703 M_CSUM_IPv4_BAD;
2704 }
2705 if (status & WRX_ST_TCPCS) {
2706 /*
2707 * Note: we don't know if this was TCP or UDP,
2708 * so we just set both bits, and expect the
2709 * upper layers to deal.
2710 */
2711 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2712 m->m_pkthdr.csum_flags |=
2713 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2714 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2715 if (errors & WRX_ER_TCPE)
2716 m->m_pkthdr.csum_flags |=
2717 M_CSUM_TCP_UDP_BAD;
2718 }
2719 }
2720
2721 ifp->if_ipackets++;
2722
2723 #if NBPFILTER > 0
2724 /* Pass this up to any BPF listeners. */
2725 if (ifp->if_bpf)
2726 bpf_mtap(ifp->if_bpf, m);
2727 #endif /* NBPFILTER > 0 */
2728
2729 /* Pass it on. */
2730 (*ifp->if_input)(ifp, m);
2731 }
2732
2733 /* Update the receive pointer. */
2734 sc->sc_rxptr = i;
2735
2736 DPRINTF(WM_DEBUG_RX,
2737 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2738 }
2739
2740 /*
2741 * wm_linkintr:
2742 *
2743 * Helper; handle link interrupts.
2744 */
2745 static void
2746 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2747 {
2748 uint32_t status;
2749
2750 /*
2751 * If we get a link status interrupt on a 1000BASE-T
2752 * device, just fall into the normal MII tick path.
2753 */
2754 if (sc->sc_flags & WM_F_HAS_MII) {
2755 if (icr & ICR_LSC) {
2756 DPRINTF(WM_DEBUG_LINK,
2757 ("%s: LINK: LSC -> mii_tick\n",
2758 sc->sc_dev.dv_xname));
2759 mii_tick(&sc->sc_mii);
2760 } else if (icr & ICR_RXSEQ) {
2761 DPRINTF(WM_DEBUG_LINK,
2762 ("%s: LINK Receive sequence error\n",
2763 sc->sc_dev.dv_xname));
2764 }
2765 return;
2766 }
2767
2768 /*
2769 * If we are now receiving /C/, check for link again in
2770 * a couple of link clock ticks.
2771 */
2772 if (icr & ICR_RXCFG) {
2773 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2774 sc->sc_dev.dv_xname));
2775 sc->sc_tbi_anstate = 2;
2776 }
2777
2778 if (icr & ICR_LSC) {
2779 status = CSR_READ(sc, WMREG_STATUS);
2780 if (status & STATUS_LU) {
2781 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2782 sc->sc_dev.dv_xname,
2783 (status & STATUS_FD) ? "FDX" : "HDX"));
2784 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2785 sc->sc_fcrtl &= ~FCRTL_XONE;
2786 if (status & STATUS_FD)
2787 sc->sc_tctl |=
2788 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2789 else
2790 sc->sc_tctl |=
2791 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2792 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2793 sc->sc_fcrtl |= FCRTL_XONE;
2794 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2795 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2796 WMREG_OLD_FCRTL : WMREG_FCRTL,
2797 sc->sc_fcrtl);
2798 sc->sc_tbi_linkup = 1;
2799 } else {
2800 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2801 sc->sc_dev.dv_xname));
2802 sc->sc_tbi_linkup = 0;
2803 }
2804 sc->sc_tbi_anstate = 2;
2805 wm_tbi_set_linkled(sc);
2806 } else if (icr & ICR_RXSEQ) {
2807 DPRINTF(WM_DEBUG_LINK,
2808 ("%s: LINK: Receive sequence error\n",
2809 sc->sc_dev.dv_xname));
2810 }
2811 }
2812
2813 /*
2814 * wm_tick:
2815 *
2816 * One second timer, used to check link status, sweep up
2817 * completed transmit jobs, etc.
2818 */
2819 static void
2820 wm_tick(void *arg)
2821 {
2822 struct wm_softc *sc = arg;
2823 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2824 int s;
2825
2826 s = splnet();
2827
2828 if (sc->sc_type >= WM_T_82542_2_1) {
2829 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2830 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2831 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2832 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2833 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2834 }
2835
2836 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2837 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2838
2839
2840 if (sc->sc_flags & WM_F_HAS_MII)
2841 mii_tick(&sc->sc_mii);
2842 else
2843 wm_tbi_check_link(sc);
2844
2845 splx(s);
2846
2847 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2848 }
2849
2850 /*
2851 * wm_reset:
2852 *
2853 * Reset the i82542 chip.
2854 */
2855 static void
2856 wm_reset(struct wm_softc *sc)
2857 {
2858 int i;
2859
2860 /*
2861 * Allocate on-chip memory according to the MTU size.
2862 * The Packet Buffer Allocation register must be written
2863 * before the chip is reset.
2864 */
2865 switch (sc->sc_type) {
2866 case WM_T_82547:
2867 case WM_T_82547_2:
2868 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2869 PBA_22K : PBA_30K;
2870 sc->sc_txfifo_head = 0;
2871 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2872 sc->sc_txfifo_size =
2873 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2874 sc->sc_txfifo_stall = 0;
2875 break;
2876 case WM_T_82571:
2877 case WM_T_82572:
2878 case WM_T_80003:
2879 sc->sc_pba = PBA_32K;
2880 break;
2881 case WM_T_82573:
2882 sc->sc_pba = PBA_12K;
2883 break;
2884 case WM_T_ICH8:
2885 sc->sc_pba = PBA_8K;
2886 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2887 break;
2888 default:
2889 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2890 PBA_40K : PBA_48K;
2891 break;
2892 }
2893 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2894
2895 /*
2896 * 82541 Errata 29? & 82547 Errata 28?
2897 * See also the description about PHY_RST bit in CTRL register
2898 * in 8254x_GBe_SDM.pdf.
2899 */
2900 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2901 CSR_WRITE(sc, WMREG_CTRL,
2902 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2903 delay(5000);
2904 }
2905
2906 switch (sc->sc_type) {
2907 case WM_T_82544:
2908 case WM_T_82540:
2909 case WM_T_82545:
2910 case WM_T_82546:
2911 case WM_T_82541:
2912 case WM_T_82541_2:
2913 /*
2914 * On some chipsets, a reset through a memory-mapped write
2915 * cycle can cause the chip to reset before completing the
2916 * write cycle. This causes major headache that can be
2917 * avoided by issuing the reset via indirect register writes
2918 * through I/O space.
2919 *
2920 * So, if we successfully mapped the I/O BAR at attach time,
2921 * use that. Otherwise, try our luck with a memory-mapped
2922 * reset.
2923 */
2924 if (sc->sc_flags & WM_F_IOH_VALID)
2925 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2926 else
2927 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2928 break;
2929
2930 case WM_T_82545_3:
2931 case WM_T_82546_3:
2932 /* Use the shadow control register on these chips. */
2933 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2934 break;
2935
2936 case WM_T_ICH8:
2937 wm_get_swfwhw_semaphore(sc);
2938 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2939
2940 default:
2941 /* Everything else can safely use the documented method. */
2942 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2943 break;
2944 }
2945 delay(10000);
2946
2947 for (i = 0; i < 1000; i++) {
2948 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2949 return;
2950 delay(20);
2951 }
2952
2953 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2954 log(LOG_ERR, "%s: reset failed to complete\n",
2955 sc->sc_dev.dv_xname);
2956
2957 if (sc->sc_type >= WM_T_80003) {
2958 /* wait for eeprom to reload */
2959 for (i = 1000; i > 0; i--) {
2960 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
2961 break;
2962 }
2963 if (i == 0) {
2964 log(LOG_ERR, "%s: auto read from eeprom failed to "
2965 "complete\n", sc->sc_dev.dv_xname);
2966 }
2967 }
2968 }
2969
2970 /*
2971 * wm_init: [ifnet interface function]
2972 *
2973 * Initialize the interface. Must be called at splnet().
2974 */
2975 static int
2976 wm_init(struct ifnet *ifp)
2977 {
2978 struct wm_softc *sc = ifp->if_softc;
2979 struct wm_rxsoft *rxs;
2980 int i, error = 0;
2981 uint32_t reg;
2982
2983 /*
2984 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2985 * There is a small but measurable benefit to avoiding the adjusment
2986 * of the descriptor so that the headers are aligned, for normal mtu,
2987 * on such platforms. One possibility is that the DMA itself is
2988 * slightly more efficient if the front of the entire packet (instead
2989 * of the front of the headers) is aligned.
2990 *
2991 * Note we must always set align_tweak to 0 if we are using
2992 * jumbo frames.
2993 */
2994 #ifdef __NO_STRICT_ALIGNMENT
2995 sc->sc_align_tweak = 0;
2996 #else
2997 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2998 sc->sc_align_tweak = 0;
2999 else
3000 sc->sc_align_tweak = 2;
3001 #endif /* __NO_STRICT_ALIGNMENT */
3002
3003 /* Cancel any pending I/O. */
3004 wm_stop(ifp, 0);
3005
3006 /* update statistics before reset */
3007 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3008 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3009
3010 /* Reset the chip to a known state. */
3011 wm_reset(sc);
3012
3013 /* Initialize the transmit descriptor ring. */
3014 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3015 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3016 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3017 sc->sc_txfree = WM_NTXDESC(sc);
3018 sc->sc_txnext = 0;
3019
3020 if (sc->sc_type < WM_T_82543) {
3021 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3022 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3023 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3024 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3025 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3026 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3027 } else {
3028 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3029 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3030 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3031 CSR_WRITE(sc, WMREG_TDH, 0);
3032 CSR_WRITE(sc, WMREG_TDT, 0);
3033 CSR_WRITE(sc, WMREG_TIDV, 64);
3034 CSR_WRITE(sc, WMREG_TADV, 128);
3035
3036 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3037 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3038 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3039 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3040 }
3041 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3042 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3043
3044 /* Initialize the transmit job descriptors. */
3045 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3046 sc->sc_txsoft[i].txs_mbuf = NULL;
3047 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3048 sc->sc_txsnext = 0;
3049 sc->sc_txsdirty = 0;
3050
3051 /*
3052 * Initialize the receive descriptor and receive job
3053 * descriptor rings.
3054 */
3055 if (sc->sc_type < WM_T_82543) {
3056 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3057 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3058 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3059 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3060 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3061 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3062
3063 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3064 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3065 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3066 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3067 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3068 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3069 } else {
3070 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3071 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3072 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3073 CSR_WRITE(sc, WMREG_RDH, 0);
3074 CSR_WRITE(sc, WMREG_RDT, 0);
3075 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD);
3076 CSR_WRITE(sc, WMREG_RADV, 128);
3077 }
3078 for (i = 0; i < WM_NRXDESC; i++) {
3079 rxs = &sc->sc_rxsoft[i];
3080 if (rxs->rxs_mbuf == NULL) {
3081 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3082 log(LOG_ERR, "%s: unable to allocate or map rx "
3083 "buffer %d, error = %d\n",
3084 sc->sc_dev.dv_xname, i, error);
3085 /*
3086 * XXX Should attempt to run with fewer receive
3087 * XXX buffers instead of just failing.
3088 */
3089 wm_rxdrain(sc);
3090 goto out;
3091 }
3092 } else
3093 WM_INIT_RXDESC(sc, i);
3094 }
3095 sc->sc_rxptr = 0;
3096 sc->sc_rxdiscard = 0;
3097 WM_RXCHAIN_RESET(sc);
3098
3099 /*
3100 * Clear out the VLAN table -- we don't use it (yet).
3101 */
3102 CSR_WRITE(sc, WMREG_VET, 0);
3103 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3104 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3105
3106 /*
3107 * Set up flow-control parameters.
3108 *
3109 * XXX Values could probably stand some tuning.
3110 */
3111 if (sc->sc_type != WM_T_ICH8) {
3112 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3113 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3114 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3115 }
3116
3117 sc->sc_fcrtl = FCRTL_DFLT;
3118 if (sc->sc_type < WM_T_82543) {
3119 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3120 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3121 } else {
3122 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3123 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3124 }
3125 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3126
3127 #if 0 /* XXXJRT */
3128 /* Deal with VLAN enables. */
3129 if (VLAN_ATTACHED(&sc->sc_ethercom))
3130 sc->sc_ctrl |= CTRL_VME;
3131 else
3132 #endif /* XXXJRT */
3133 sc->sc_ctrl &= ~CTRL_VME;
3134
3135 /* Write the control registers. */
3136 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3137 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3138 int val;
3139 val = CSR_READ(sc, WMREG_CTRL_EXT);
3140 val &= ~CTRL_EXT_LINK_MODE_MASK;
3141 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3142
3143 /* Bypass RX and TX FIFO's */
3144 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3145 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3146 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3147
3148 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3149 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3150 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3151 /*
3152 * Set the mac to wait the maximum time between each
3153 * iteration and increase the max iterations when
3154 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3155 */
3156 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3157 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3158 val |= 0x3F;
3159 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3160 }
3161 #if 0
3162 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3163 #endif
3164
3165 /*
3166 * Set up checksum offload parameters.
3167 */
3168 reg = CSR_READ(sc, WMREG_RXCSUM);
3169 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3170 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3171 reg |= RXCSUM_IPOFL;
3172 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3173 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3174 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3175 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3176 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3177
3178 /*
3179 * Set up the interrupt registers.
3180 */
3181 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3182 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3183 ICR_RXO | ICR_RXT0;
3184 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3185 sc->sc_icr |= ICR_RXCFG;
3186 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3187
3188 /* Set up the inter-packet gap. */
3189 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3190
3191 if (sc->sc_type >= WM_T_82543) {
3192 /* Set up the interrupt throttling register (units of 256ns) */
3193 sc->sc_itr = 1000000000 / (7000 * 256);
3194 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3195 }
3196
3197 #if 0 /* XXXJRT */
3198 /* Set the VLAN ethernetype. */
3199 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3200 #endif
3201
3202 /*
3203 * Set up the transmit control register; we start out with
3204 * a collision distance suitable for FDX, but update it whe
3205 * we resolve the media type.
3206 */
3207 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3208 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3209 if (sc->sc_type >= WM_T_82571)
3210 sc->sc_tctl |= TCTL_MULR;
3211 if (sc->sc_type >= WM_T_80003)
3212 sc->sc_tctl |= TCTL_RTLC;
3213 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3214
3215 /* Set the media. */
3216 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
3217
3218 /*
3219 * Set up the receive control register; we actually program
3220 * the register when we set the receive filter. Use multicast
3221 * address offset type 0.
3222 *
3223 * Only the i82544 has the ability to strip the incoming
3224 * CRC, so we don't enable that feature.
3225 */
3226 sc->sc_mchash_type = 0;
3227 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3228 | RCTL_MO(sc->sc_mchash_type);
3229
3230 /* 82573 doesn't support jumbo frame */
3231 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3232 sc->sc_rctl |= RCTL_LPE;
3233
3234 if (MCLBYTES == 2048) {
3235 sc->sc_rctl |= RCTL_2k;
3236 } else {
3237 if (sc->sc_type >= WM_T_82543) {
3238 switch(MCLBYTES) {
3239 case 4096:
3240 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3241 break;
3242 case 8192:
3243 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3244 break;
3245 case 16384:
3246 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3247 break;
3248 default:
3249 panic("wm_init: MCLBYTES %d unsupported",
3250 MCLBYTES);
3251 break;
3252 }
3253 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3254 }
3255
3256 /* Set the receive filter. */
3257 wm_set_filter(sc);
3258
3259 /* Start the one second link check clock. */
3260 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3261
3262 /* ...all done! */
3263 ifp->if_flags |= IFF_RUNNING;
3264 ifp->if_flags &= ~IFF_OACTIVE;
3265
3266 out:
3267 if (error)
3268 log(LOG_ERR, "%s: interface not running\n",
3269 sc->sc_dev.dv_xname);
3270 return (error);
3271 }
3272
3273 /*
3274 * wm_rxdrain:
3275 *
3276 * Drain the receive queue.
3277 */
3278 static void
3279 wm_rxdrain(struct wm_softc *sc)
3280 {
3281 struct wm_rxsoft *rxs;
3282 int i;
3283
3284 for (i = 0; i < WM_NRXDESC; i++) {
3285 rxs = &sc->sc_rxsoft[i];
3286 if (rxs->rxs_mbuf != NULL) {
3287 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3288 m_freem(rxs->rxs_mbuf);
3289 rxs->rxs_mbuf = NULL;
3290 }
3291 }
3292 }
3293
3294 /*
3295 * wm_stop: [ifnet interface function]
3296 *
3297 * Stop transmission on the interface.
3298 */
3299 static void
3300 wm_stop(struct ifnet *ifp, int disable)
3301 {
3302 struct wm_softc *sc = ifp->if_softc;
3303 struct wm_txsoft *txs;
3304 int i;
3305
3306 /* Stop the one second clock. */
3307 callout_stop(&sc->sc_tick_ch);
3308
3309 /* Stop the 82547 Tx FIFO stall check timer. */
3310 if (sc->sc_type == WM_T_82547)
3311 callout_stop(&sc->sc_txfifo_ch);
3312
3313 if (sc->sc_flags & WM_F_HAS_MII) {
3314 /* Down the MII. */
3315 mii_down(&sc->sc_mii);
3316 }
3317
3318 /* Stop the transmit and receive processes. */
3319 CSR_WRITE(sc, WMREG_TCTL, 0);
3320 CSR_WRITE(sc, WMREG_RCTL, 0);
3321
3322 /*
3323 * Clear the interrupt mask to ensure the device cannot assert its
3324 * interrupt line.
3325 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3326 * any currently pending or shared interrupt.
3327 */
3328 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3329 sc->sc_icr = 0;
3330
3331 /* Release any queued transmit buffers. */
3332 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3333 txs = &sc->sc_txsoft[i];
3334 if (txs->txs_mbuf != NULL) {
3335 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3336 m_freem(txs->txs_mbuf);
3337 txs->txs_mbuf = NULL;
3338 }
3339 }
3340
3341 if (disable)
3342 wm_rxdrain(sc);
3343
3344 /* Mark the interface as down and cancel the watchdog timer. */
3345 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3346 ifp->if_timer = 0;
3347 }
3348
3349 /*
3350 * wm_acquire_eeprom:
3351 *
3352 * Perform the EEPROM handshake required on some chips.
3353 */
3354 static int
3355 wm_acquire_eeprom(struct wm_softc *sc)
3356 {
3357 uint32_t reg;
3358 int x;
3359 int ret = 0;
3360
3361 /* always success */
3362 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3363 return 0;
3364
3365 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3366 ret = wm_get_swfwhw_semaphore(sc);
3367 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3368 /* this will also do wm_get_swsm_semaphore() if needed */
3369 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3370 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3371 ret = wm_get_swsm_semaphore(sc);
3372 }
3373
3374 if (ret)
3375 return 1;
3376
3377 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3378 reg = CSR_READ(sc, WMREG_EECD);
3379
3380 /* Request EEPROM access. */
3381 reg |= EECD_EE_REQ;
3382 CSR_WRITE(sc, WMREG_EECD, reg);
3383
3384 /* ..and wait for it to be granted. */
3385 for (x = 0; x < 1000; x++) {
3386 reg = CSR_READ(sc, WMREG_EECD);
3387 if (reg & EECD_EE_GNT)
3388 break;
3389 delay(5);
3390 }
3391 if ((reg & EECD_EE_GNT) == 0) {
3392 aprint_error("%s: could not acquire EEPROM GNT\n",
3393 sc->sc_dev.dv_xname);
3394 reg &= ~EECD_EE_REQ;
3395 CSR_WRITE(sc, WMREG_EECD, reg);
3396 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3397 wm_put_swfwhw_semaphore(sc);
3398 if (sc->sc_flags & WM_F_SWFW_SYNC)
3399 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3400 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3401 wm_put_swsm_semaphore(sc);
3402 return (1);
3403 }
3404 }
3405
3406 return (0);
3407 }
3408
3409 /*
3410 * wm_release_eeprom:
3411 *
3412 * Release the EEPROM mutex.
3413 */
3414 static void
3415 wm_release_eeprom(struct wm_softc *sc)
3416 {
3417 uint32_t reg;
3418
3419 /* always success */
3420 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3421 return;
3422
3423 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3424 reg = CSR_READ(sc, WMREG_EECD);
3425 reg &= ~EECD_EE_REQ;
3426 CSR_WRITE(sc, WMREG_EECD, reg);
3427 }
3428
3429 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3430 wm_put_swfwhw_semaphore(sc);
3431 if (sc->sc_flags & WM_F_SWFW_SYNC)
3432 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3433 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3434 wm_put_swsm_semaphore(sc);
3435 }
3436
3437 /*
3438 * wm_eeprom_sendbits:
3439 *
3440 * Send a series of bits to the EEPROM.
3441 */
3442 static void
3443 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3444 {
3445 uint32_t reg;
3446 int x;
3447
3448 reg = CSR_READ(sc, WMREG_EECD);
3449
3450 for (x = nbits; x > 0; x--) {
3451 if (bits & (1U << (x - 1)))
3452 reg |= EECD_DI;
3453 else
3454 reg &= ~EECD_DI;
3455 CSR_WRITE(sc, WMREG_EECD, reg);
3456 delay(2);
3457 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3458 delay(2);
3459 CSR_WRITE(sc, WMREG_EECD, reg);
3460 delay(2);
3461 }
3462 }
3463
3464 /*
3465 * wm_eeprom_recvbits:
3466 *
3467 * Receive a series of bits from the EEPROM.
3468 */
3469 static void
3470 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3471 {
3472 uint32_t reg, val;
3473 int x;
3474
3475 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3476
3477 val = 0;
3478 for (x = nbits; x > 0; x--) {
3479 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3480 delay(2);
3481 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3482 val |= (1U << (x - 1));
3483 CSR_WRITE(sc, WMREG_EECD, reg);
3484 delay(2);
3485 }
3486 *valp = val;
3487 }
3488
3489 /*
3490 * wm_read_eeprom_uwire:
3491 *
3492 * Read a word from the EEPROM using the MicroWire protocol.
3493 */
3494 static int
3495 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3496 {
3497 uint32_t reg, val;
3498 int i;
3499
3500 for (i = 0; i < wordcnt; i++) {
3501 /* Clear SK and DI. */
3502 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3503 CSR_WRITE(sc, WMREG_EECD, reg);
3504
3505 /* Set CHIP SELECT. */
3506 reg |= EECD_CS;
3507 CSR_WRITE(sc, WMREG_EECD, reg);
3508 delay(2);
3509
3510 /* Shift in the READ command. */
3511 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3512
3513 /* Shift in address. */
3514 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3515
3516 /* Shift out the data. */
3517 wm_eeprom_recvbits(sc, &val, 16);
3518 data[i] = val & 0xffff;
3519
3520 /* Clear CHIP SELECT. */
3521 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3522 CSR_WRITE(sc, WMREG_EECD, reg);
3523 delay(2);
3524 }
3525
3526 return (0);
3527 }
3528
3529 /*
3530 * wm_spi_eeprom_ready:
3531 *
3532 * Wait for a SPI EEPROM to be ready for commands.
3533 */
3534 static int
3535 wm_spi_eeprom_ready(struct wm_softc *sc)
3536 {
3537 uint32_t val;
3538 int usec;
3539
3540 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3541 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3542 wm_eeprom_recvbits(sc, &val, 8);
3543 if ((val & SPI_SR_RDY) == 0)
3544 break;
3545 }
3546 if (usec >= SPI_MAX_RETRIES) {
3547 aprint_error("%s: EEPROM failed to become ready\n",
3548 sc->sc_dev.dv_xname);
3549 return (1);
3550 }
3551 return (0);
3552 }
3553
3554 /*
3555 * wm_read_eeprom_spi:
3556 *
3557 * Read a work from the EEPROM using the SPI protocol.
3558 */
3559 static int
3560 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3561 {
3562 uint32_t reg, val;
3563 int i;
3564 uint8_t opc;
3565
3566 /* Clear SK and CS. */
3567 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3568 CSR_WRITE(sc, WMREG_EECD, reg);
3569 delay(2);
3570
3571 if (wm_spi_eeprom_ready(sc))
3572 return (1);
3573
3574 /* Toggle CS to flush commands. */
3575 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3576 delay(2);
3577 CSR_WRITE(sc, WMREG_EECD, reg);
3578 delay(2);
3579
3580 opc = SPI_OPC_READ;
3581 if (sc->sc_ee_addrbits == 8 && word >= 128)
3582 opc |= SPI_OPC_A8;
3583
3584 wm_eeprom_sendbits(sc, opc, 8);
3585 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3586
3587 for (i = 0; i < wordcnt; i++) {
3588 wm_eeprom_recvbits(sc, &val, 16);
3589 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3590 }
3591
3592 /* Raise CS and clear SK. */
3593 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3594 CSR_WRITE(sc, WMREG_EECD, reg);
3595 delay(2);
3596
3597 return (0);
3598 }
3599
3600 #define EEPROM_CHECKSUM 0xBABA
3601 #define EEPROM_SIZE 0x0040
3602
3603 /*
3604 * wm_validate_eeprom_checksum
3605 *
3606 * The checksum is defined as the sum of the first 64 (16 bit) words.
3607 */
3608 static int
3609 wm_validate_eeprom_checksum(struct wm_softc *sc)
3610 {
3611 uint16_t checksum;
3612 uint16_t eeprom_data;
3613 int i;
3614
3615 checksum = 0;
3616
3617 for (i = 0; i < EEPROM_SIZE; i++) {
3618 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3619 return 1;
3620 checksum += eeprom_data;
3621 }
3622
3623 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3624 return 1;
3625
3626 return 0;
3627 }
3628
3629 /*
3630 * wm_read_eeprom:
3631 *
3632 * Read data from the serial EEPROM.
3633 */
3634 static int
3635 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3636 {
3637 int rv;
3638
3639 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3640 return 1;
3641
3642 if (wm_acquire_eeprom(sc))
3643 return 1;
3644
3645 if (sc->sc_type == WM_T_ICH8)
3646 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3647 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3648 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3649 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3650 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3651 else
3652 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3653
3654 wm_release_eeprom(sc);
3655 return rv;
3656 }
3657
3658 static int
3659 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3660 uint16_t *data)
3661 {
3662 int i, eerd = 0;
3663 int error = 0;
3664
3665 for (i = 0; i < wordcnt; i++) {
3666 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3667
3668 CSR_WRITE(sc, WMREG_EERD, eerd);
3669 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3670 if (error != 0)
3671 break;
3672
3673 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3674 }
3675
3676 return error;
3677 }
3678
3679 static int
3680 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3681 {
3682 uint32_t attempts = 100000;
3683 uint32_t i, reg = 0;
3684 int32_t done = -1;
3685
3686 for (i = 0; i < attempts; i++) {
3687 reg = CSR_READ(sc, rw);
3688
3689 if (reg & EERD_DONE) {
3690 done = 0;
3691 break;
3692 }
3693 delay(5);
3694 }
3695
3696 return done;
3697 }
3698
3699 /*
3700 * wm_add_rxbuf:
3701 *
3702 * Add a receive buffer to the indiciated descriptor.
3703 */
3704 static int
3705 wm_add_rxbuf(struct wm_softc *sc, int idx)
3706 {
3707 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3708 struct mbuf *m;
3709 int error;
3710
3711 MGETHDR(m, M_DONTWAIT, MT_DATA);
3712 if (m == NULL)
3713 return (ENOBUFS);
3714
3715 MCLGET(m, M_DONTWAIT);
3716 if ((m->m_flags & M_EXT) == 0) {
3717 m_freem(m);
3718 return (ENOBUFS);
3719 }
3720
3721 if (rxs->rxs_mbuf != NULL)
3722 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3723
3724 rxs->rxs_mbuf = m;
3725
3726 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3727 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3728 BUS_DMA_READ|BUS_DMA_NOWAIT);
3729 if (error) {
3730 /* XXX XXX XXX */
3731 printf("%s: unable to load rx DMA map %d, error = %d\n",
3732 sc->sc_dev.dv_xname, idx, error);
3733 panic("wm_add_rxbuf");
3734 }
3735
3736 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3737 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3738
3739 WM_INIT_RXDESC(sc, idx);
3740
3741 return (0);
3742 }
3743
3744 /*
3745 * wm_set_ral:
3746 *
3747 * Set an entery in the receive address list.
3748 */
3749 static void
3750 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3751 {
3752 uint32_t ral_lo, ral_hi;
3753
3754 if (enaddr != NULL) {
3755 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3756 (enaddr[3] << 24);
3757 ral_hi = enaddr[4] | (enaddr[5] << 8);
3758 ral_hi |= RAL_AV;
3759 } else {
3760 ral_lo = 0;
3761 ral_hi = 0;
3762 }
3763
3764 if (sc->sc_type >= WM_T_82544) {
3765 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3766 ral_lo);
3767 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3768 ral_hi);
3769 } else {
3770 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3771 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3772 }
3773 }
3774
3775 /*
3776 * wm_mchash:
3777 *
3778 * Compute the hash of the multicast address for the 4096-bit
3779 * multicast filter.
3780 */
3781 static uint32_t
3782 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3783 {
3784 static const int lo_shift[4] = { 4, 3, 2, 0 };
3785 static const int hi_shift[4] = { 4, 5, 6, 8 };
3786 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3787 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3788 uint32_t hash;
3789
3790 if (sc->sc_type == WM_T_ICH8) {
3791 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3792 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3793 return (hash & 0x3ff);
3794 }
3795 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3796 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3797
3798 return (hash & 0xfff);
3799 }
3800
3801 /*
3802 * wm_set_filter:
3803 *
3804 * Set up the receive filter.
3805 */
3806 static void
3807 wm_set_filter(struct wm_softc *sc)
3808 {
3809 struct ethercom *ec = &sc->sc_ethercom;
3810 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3811 struct ether_multi *enm;
3812 struct ether_multistep step;
3813 bus_addr_t mta_reg;
3814 uint32_t hash, reg, bit;
3815 int i, size;
3816
3817 if (sc->sc_type >= WM_T_82544)
3818 mta_reg = WMREG_CORDOVA_MTA;
3819 else
3820 mta_reg = WMREG_MTA;
3821
3822 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3823
3824 if (ifp->if_flags & IFF_BROADCAST)
3825 sc->sc_rctl |= RCTL_BAM;
3826 if (ifp->if_flags & IFF_PROMISC) {
3827 sc->sc_rctl |= RCTL_UPE;
3828 goto allmulti;
3829 }
3830
3831 /*
3832 * Set the station address in the first RAL slot, and
3833 * clear the remaining slots.
3834 */
3835 if (sc->sc_type == WM_T_ICH8)
3836 size = WM_ICH8_RAL_TABSIZE;
3837 else
3838 size = WM_RAL_TABSIZE;
3839 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3840 for (i = 1; i < size; i++)
3841 wm_set_ral(sc, NULL, i);
3842
3843 if (sc->sc_type == WM_T_ICH8)
3844 size = WM_ICH8_MC_TABSIZE;
3845 else
3846 size = WM_MC_TABSIZE;
3847 /* Clear out the multicast table. */
3848 for (i = 0; i < size; i++)
3849 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3850
3851 ETHER_FIRST_MULTI(step, ec, enm);
3852 while (enm != NULL) {
3853 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3854 /*
3855 * We must listen to a range of multicast addresses.
3856 * For now, just accept all multicasts, rather than
3857 * trying to set only those filter bits needed to match
3858 * the range. (At this time, the only use of address
3859 * ranges is for IP multicast routing, for which the
3860 * range is big enough to require all bits set.)
3861 */
3862 goto allmulti;
3863 }
3864
3865 hash = wm_mchash(sc, enm->enm_addrlo);
3866
3867 reg = (hash >> 5);
3868 if (sc->sc_type == WM_T_ICH8)
3869 reg &= 0x1f;
3870 else
3871 reg &= 0x7f;
3872 bit = hash & 0x1f;
3873
3874 hash = CSR_READ(sc, mta_reg + (reg << 2));
3875 hash |= 1U << bit;
3876
3877 /* XXX Hardware bug?? */
3878 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3879 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3880 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3881 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3882 } else
3883 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3884
3885 ETHER_NEXT_MULTI(step, enm);
3886 }
3887
3888 ifp->if_flags &= ~IFF_ALLMULTI;
3889 goto setit;
3890
3891 allmulti:
3892 ifp->if_flags |= IFF_ALLMULTI;
3893 sc->sc_rctl |= RCTL_MPE;
3894
3895 setit:
3896 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3897 }
3898
3899 /*
3900 * wm_tbi_mediainit:
3901 *
3902 * Initialize media for use on 1000BASE-X devices.
3903 */
3904 static void
3905 wm_tbi_mediainit(struct wm_softc *sc)
3906 {
3907 const char *sep = "";
3908
3909 if (sc->sc_type < WM_T_82543)
3910 sc->sc_tipg = TIPG_WM_DFLT;
3911 else
3912 sc->sc_tipg = TIPG_LG_DFLT;
3913
3914 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3915 wm_tbi_mediastatus);
3916
3917 /*
3918 * SWD Pins:
3919 *
3920 * 0 = Link LED (output)
3921 * 1 = Loss Of Signal (input)
3922 */
3923 sc->sc_ctrl |= CTRL_SWDPIO(0);
3924 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3925
3926 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3927
3928 #define ADD(ss, mm, dd) \
3929 do { \
3930 aprint_normal("%s%s", sep, ss); \
3931 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3932 sep = ", "; \
3933 } while (/*CONSTCOND*/0)
3934
3935 aprint_normal("%s: ", sc->sc_dev.dv_xname);
3936 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3937 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3938 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3939 aprint_normal("\n");
3940
3941 #undef ADD
3942
3943 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3944 }
3945
3946 /*
3947 * wm_tbi_mediastatus: [ifmedia interface function]
3948 *
3949 * Get the current interface media status on a 1000BASE-X device.
3950 */
3951 static void
3952 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3953 {
3954 struct wm_softc *sc = ifp->if_softc;
3955 uint32_t ctrl;
3956
3957 ifmr->ifm_status = IFM_AVALID;
3958 ifmr->ifm_active = IFM_ETHER;
3959
3960 if (sc->sc_tbi_linkup == 0) {
3961 ifmr->ifm_active |= IFM_NONE;
3962 return;
3963 }
3964
3965 ifmr->ifm_status |= IFM_ACTIVE;
3966 ifmr->ifm_active |= IFM_1000_SX;
3967 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3968 ifmr->ifm_active |= IFM_FDX;
3969 ctrl = CSR_READ(sc, WMREG_CTRL);
3970 if (ctrl & CTRL_RFCE)
3971 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
3972 if (ctrl & CTRL_TFCE)
3973 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
3974 }
3975
3976 /*
3977 * wm_tbi_mediachange: [ifmedia interface function]
3978 *
3979 * Set hardware to newly-selected media on a 1000BASE-X device.
3980 */
3981 static int
3982 wm_tbi_mediachange(struct ifnet *ifp)
3983 {
3984 struct wm_softc *sc = ifp->if_softc;
3985 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3986 uint32_t status;
3987 int i;
3988
3989 sc->sc_txcw = ife->ifm_data;
3990 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
3991 sc->sc_dev.dv_xname,sc->sc_txcw));
3992 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
3993 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
3994 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
3995 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
3996 sc->sc_txcw |= TXCW_ANE;
3997 } else {
3998 /*If autonegotiation is turned off, force link up and turn on full duplex*/
3999 sc->sc_txcw &= ~TXCW_ANE;
4000 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4001 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4002 delay(1000);
4003 }
4004
4005 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4006 sc->sc_dev.dv_xname,sc->sc_txcw));
4007 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4008 delay(10000);
4009
4010 /* NOTE: CTRL will update TFCE and RFCE automatically. */
4011
4012 sc->sc_tbi_anstate = 0;
4013
4014 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4015 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i));
4016
4017 /*
4018 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4019 * optics detect a signal, 0 if they don't.
4020 */
4021 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4022 /* Have signal; wait for the link to come up. */
4023
4024 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4025 /*
4026 * Reset the link, and let autonegotiation do its thing
4027 */
4028 sc->sc_ctrl |= CTRL_LRST;
4029 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4030 delay(1000);
4031 sc->sc_ctrl &= ~CTRL_LRST;
4032 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4033 delay(1000);
4034 }
4035
4036 for (i = 0; i < 50; i++) {
4037 delay(10000);
4038 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4039 break;
4040 }
4041
4042 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4043 sc->sc_dev.dv_xname,i));
4044
4045 status = CSR_READ(sc, WMREG_STATUS);
4046 DPRINTF(WM_DEBUG_LINK,
4047 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4048 sc->sc_dev.dv_xname,status, STATUS_LU));
4049 if (status & STATUS_LU) {
4050 /* Link is up. */
4051 DPRINTF(WM_DEBUG_LINK,
4052 ("%s: LINK: set media -> link up %s\n",
4053 sc->sc_dev.dv_xname,
4054 (status & STATUS_FD) ? "FDX" : "HDX"));
4055 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4056 sc->sc_fcrtl &= ~FCRTL_XONE;
4057 if (status & STATUS_FD)
4058 sc->sc_tctl |=
4059 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4060 else
4061 sc->sc_tctl |=
4062 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4063 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4064 sc->sc_fcrtl |= FCRTL_XONE;
4065 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4066 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4067 WMREG_OLD_FCRTL : WMREG_FCRTL,
4068 sc->sc_fcrtl);
4069 sc->sc_tbi_linkup = 1;
4070 } else {
4071 /* Link is down. */
4072 DPRINTF(WM_DEBUG_LINK,
4073 ("%s: LINK: set media -> link down\n",
4074 sc->sc_dev.dv_xname));
4075 sc->sc_tbi_linkup = 0;
4076 }
4077 } else {
4078 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4079 sc->sc_dev.dv_xname));
4080 sc->sc_tbi_linkup = 0;
4081 }
4082
4083 wm_tbi_set_linkled(sc);
4084
4085 return (0);
4086 }
4087
4088 /*
4089 * wm_tbi_set_linkled:
4090 *
4091 * Update the link LED on 1000BASE-X devices.
4092 */
4093 static void
4094 wm_tbi_set_linkled(struct wm_softc *sc)
4095 {
4096
4097 if (sc->sc_tbi_linkup)
4098 sc->sc_ctrl |= CTRL_SWDPIN(0);
4099 else
4100 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4101
4102 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4103 }
4104
4105 /*
4106 * wm_tbi_check_link:
4107 *
4108 * Check the link on 1000BASE-X devices.
4109 */
4110 static void
4111 wm_tbi_check_link(struct wm_softc *sc)
4112 {
4113 uint32_t rxcw, ctrl, status;
4114
4115 if (sc->sc_tbi_anstate == 0)
4116 return;
4117 else if (sc->sc_tbi_anstate > 1) {
4118 DPRINTF(WM_DEBUG_LINK,
4119 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
4120 sc->sc_tbi_anstate));
4121 sc->sc_tbi_anstate--;
4122 return;
4123 }
4124
4125 sc->sc_tbi_anstate = 0;
4126
4127 rxcw = CSR_READ(sc, WMREG_RXCW);
4128 ctrl = CSR_READ(sc, WMREG_CTRL);
4129 status = CSR_READ(sc, WMREG_STATUS);
4130
4131 if ((status & STATUS_LU) == 0) {
4132 DPRINTF(WM_DEBUG_LINK,
4133 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
4134 sc->sc_tbi_linkup = 0;
4135 } else {
4136 DPRINTF(WM_DEBUG_LINK,
4137 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
4138 (status & STATUS_FD) ? "FDX" : "HDX"));
4139 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4140 sc->sc_fcrtl &= ~FCRTL_XONE;
4141 if (status & STATUS_FD)
4142 sc->sc_tctl |=
4143 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4144 else
4145 sc->sc_tctl |=
4146 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4147 if (ctrl & CTRL_TFCE)
4148 sc->sc_fcrtl |= FCRTL_XONE;
4149 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4150 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4151 WMREG_OLD_FCRTL : WMREG_FCRTL,
4152 sc->sc_fcrtl);
4153 sc->sc_tbi_linkup = 1;
4154 }
4155
4156 wm_tbi_set_linkled(sc);
4157 }
4158
4159 /*
4160 * wm_gmii_reset:
4161 *
4162 * Reset the PHY.
4163 */
4164 static void
4165 wm_gmii_reset(struct wm_softc *sc)
4166 {
4167 uint32_t reg;
4168 int func = 0; /* XXX gcc */
4169
4170 if (sc->sc_type == WM_T_ICH8) {
4171 if (wm_get_swfwhw_semaphore(sc))
4172 return;
4173 }
4174 if (sc->sc_type == WM_T_80003) {
4175 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4176 if (wm_get_swfw_semaphore(sc,
4177 func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4178 return;
4179 }
4180 if (sc->sc_type >= WM_T_82544) {
4181 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4182 delay(20000);
4183
4184 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4185 delay(20000);
4186 } else {
4187 /*
4188 * With 82543, we need to force speed and duplex on the MAC
4189 * equal to what the PHY speed and duplex configuration is.
4190 * In addition, we need to perform a hardware reset on the PHY
4191 * to take it out of reset.
4192 */
4193 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4194 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4195
4196 /* The PHY reset pin is active-low. */
4197 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4198 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4199 CTRL_EXT_SWDPIN(4));
4200 reg |= CTRL_EXT_SWDPIO(4);
4201
4202 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4203 delay(10);
4204
4205 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4206 delay(10000);
4207
4208 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4209 delay(10);
4210 #if 0
4211 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4212 #endif
4213 }
4214 if (sc->sc_type == WM_T_ICH8)
4215 wm_put_swfwhw_semaphore(sc);
4216 if (sc->sc_type == WM_T_80003)
4217 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4218 }
4219
4220 /*
4221 * wm_gmii_mediainit:
4222 *
4223 * Initialize media for use on 1000BASE-T devices.
4224 */
4225 static void
4226 wm_gmii_mediainit(struct wm_softc *sc)
4227 {
4228 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4229
4230 /* We have MII. */
4231 sc->sc_flags |= WM_F_HAS_MII;
4232
4233 if (sc->sc_type >= WM_T_80003)
4234 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4235 else
4236 sc->sc_tipg = TIPG_1000T_DFLT;
4237
4238 /*
4239 * Let the chip set speed/duplex on its own based on
4240 * signals from the PHY.
4241 * XXXbouyer - I'm not sure this is right for the 80003,
4242 * the em driver only sets CTRL_SLU here - but it seems to work.
4243 */
4244 sc->sc_ctrl |= CTRL_SLU;
4245 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4246
4247 /* Initialize our media structures and probe the GMII. */
4248 sc->sc_mii.mii_ifp = ifp;
4249
4250 if (sc->sc_type >= WM_T_80003) {
4251 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4252 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4253 } else if (sc->sc_type >= WM_T_82544) {
4254 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4255 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4256 } else {
4257 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4258 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4259 }
4260 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4261
4262 wm_gmii_reset(sc);
4263
4264 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4265 wm_gmii_mediastatus);
4266
4267 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4268 MII_OFFSET_ANY, MIIF_DOPAUSE);
4269 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4270 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4271 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4272 } else
4273 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4274 }
4275
4276 /*
4277 * wm_gmii_mediastatus: [ifmedia interface function]
4278 *
4279 * Get the current interface media status on a 1000BASE-T device.
4280 */
4281 static void
4282 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4283 {
4284 struct wm_softc *sc = ifp->if_softc;
4285
4286 mii_pollstat(&sc->sc_mii);
4287 ifmr->ifm_status = sc->sc_mii.mii_media_status;
4288 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
4289 sc->sc_flowflags;
4290 }
4291
4292 /*
4293 * wm_gmii_mediachange: [ifmedia interface function]
4294 *
4295 * Set hardware to newly-selected media on a 1000BASE-T device.
4296 */
4297 static int
4298 wm_gmii_mediachange(struct ifnet *ifp)
4299 {
4300 struct wm_softc *sc = ifp->if_softc;
4301 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4302
4303 if (ifp->if_flags & IFF_UP) {
4304 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4305 sc->sc_ctrl |= CTRL_SLU;
4306 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4307 || (sc->sc_type > WM_T_82543)) {
4308 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4309 } else {
4310 sc->sc_ctrl &= ~CTRL_ASDE;
4311 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4312 if (ife->ifm_media & IFM_FDX)
4313 sc->sc_ctrl |= CTRL_FD;
4314 switch(IFM_SUBTYPE(ife->ifm_media)) {
4315 case IFM_10_T:
4316 sc->sc_ctrl |= CTRL_SPEED_10;
4317 break;
4318 case IFM_100_TX:
4319 sc->sc_ctrl |= CTRL_SPEED_100;
4320 break;
4321 case IFM_1000_T:
4322 sc->sc_ctrl |= CTRL_SPEED_1000;
4323 break;
4324 default:
4325 panic("wm_gmii_mediachange: bad media 0x%x",
4326 ife->ifm_media);
4327 }
4328 }
4329 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4330 if (sc->sc_type <= WM_T_82543)
4331 wm_gmii_reset(sc);
4332 mii_mediachg(&sc->sc_mii);
4333 }
4334 return (0);
4335 }
4336
4337 #define MDI_IO CTRL_SWDPIN(2)
4338 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4339 #define MDI_CLK CTRL_SWDPIN(3)
4340
4341 static void
4342 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4343 {
4344 uint32_t i, v;
4345
4346 v = CSR_READ(sc, WMREG_CTRL);
4347 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4348 v |= MDI_DIR | CTRL_SWDPIO(3);
4349
4350 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4351 if (data & i)
4352 v |= MDI_IO;
4353 else
4354 v &= ~MDI_IO;
4355 CSR_WRITE(sc, WMREG_CTRL, v);
4356 delay(10);
4357 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4358 delay(10);
4359 CSR_WRITE(sc, WMREG_CTRL, v);
4360 delay(10);
4361 }
4362 }
4363
4364 static uint32_t
4365 i82543_mii_recvbits(struct wm_softc *sc)
4366 {
4367 uint32_t v, i, data = 0;
4368
4369 v = CSR_READ(sc, WMREG_CTRL);
4370 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4371 v |= CTRL_SWDPIO(3);
4372
4373 CSR_WRITE(sc, WMREG_CTRL, v);
4374 delay(10);
4375 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4376 delay(10);
4377 CSR_WRITE(sc, WMREG_CTRL, v);
4378 delay(10);
4379
4380 for (i = 0; i < 16; i++) {
4381 data <<= 1;
4382 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4383 delay(10);
4384 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4385 data |= 1;
4386 CSR_WRITE(sc, WMREG_CTRL, v);
4387 delay(10);
4388 }
4389
4390 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4391 delay(10);
4392 CSR_WRITE(sc, WMREG_CTRL, v);
4393 delay(10);
4394
4395 return (data);
4396 }
4397
4398 #undef MDI_IO
4399 #undef MDI_DIR
4400 #undef MDI_CLK
4401
4402 /*
4403 * wm_gmii_i82543_readreg: [mii interface function]
4404 *
4405 * Read a PHY register on the GMII (i82543 version).
4406 */
4407 static int
4408 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
4409 {
4410 struct wm_softc *sc = (void *) self;
4411 int rv;
4412
4413 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4414 i82543_mii_sendbits(sc, reg | (phy << 5) |
4415 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4416 rv = i82543_mii_recvbits(sc) & 0xffff;
4417
4418 DPRINTF(WM_DEBUG_GMII,
4419 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4420 sc->sc_dev.dv_xname, phy, reg, rv));
4421
4422 return (rv);
4423 }
4424
4425 /*
4426 * wm_gmii_i82543_writereg: [mii interface function]
4427 *
4428 * Write a PHY register on the GMII (i82543 version).
4429 */
4430 static void
4431 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
4432 {
4433 struct wm_softc *sc = (void *) self;
4434
4435 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4436 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4437 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4438 (MII_COMMAND_START << 30), 32);
4439 }
4440
4441 /*
4442 * wm_gmii_i82544_readreg: [mii interface function]
4443 *
4444 * Read a PHY register on the GMII.
4445 */
4446 static int
4447 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
4448 {
4449 struct wm_softc *sc = (void *) self;
4450 uint32_t mdic = 0;
4451 int i, rv;
4452
4453 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4454 MDIC_REGADD(reg));
4455
4456 for (i = 0; i < 320; i++) {
4457 mdic = CSR_READ(sc, WMREG_MDIC);
4458 if (mdic & MDIC_READY)
4459 break;
4460 delay(10);
4461 }
4462
4463 if ((mdic & MDIC_READY) == 0) {
4464 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4465 sc->sc_dev.dv_xname, phy, reg);
4466 rv = 0;
4467 } else if (mdic & MDIC_E) {
4468 #if 0 /* This is normal if no PHY is present. */
4469 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4470 sc->sc_dev.dv_xname, phy, reg);
4471 #endif
4472 rv = 0;
4473 } else {
4474 rv = MDIC_DATA(mdic);
4475 if (rv == 0xffff)
4476 rv = 0;
4477 }
4478
4479 return (rv);
4480 }
4481
4482 /*
4483 * wm_gmii_i82544_writereg: [mii interface function]
4484 *
4485 * Write a PHY register on the GMII.
4486 */
4487 static void
4488 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
4489 {
4490 struct wm_softc *sc = (void *) self;
4491 uint32_t mdic = 0;
4492 int i;
4493
4494 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4495 MDIC_REGADD(reg) | MDIC_DATA(val));
4496
4497 for (i = 0; i < 320; i++) {
4498 mdic = CSR_READ(sc, WMREG_MDIC);
4499 if (mdic & MDIC_READY)
4500 break;
4501 delay(10);
4502 }
4503
4504 if ((mdic & MDIC_READY) == 0)
4505 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4506 sc->sc_dev.dv_xname, phy, reg);
4507 else if (mdic & MDIC_E)
4508 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4509 sc->sc_dev.dv_xname, phy, reg);
4510 }
4511
4512 /*
4513 * wm_gmii_i80003_readreg: [mii interface function]
4514 *
4515 * Read a PHY register on the kumeran
4516 * This could be handled by the PHY layer if we didn't have to lock the
4517 * ressource ...
4518 */
4519 static int
4520 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
4521 {
4522 struct wm_softc *sc = (void *) self;
4523 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4524 int rv;
4525
4526 if (phy != 1) /* only one PHY on kumeran bus */
4527 return 0;
4528
4529 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4530 return 0;
4531
4532 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4533 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4534 reg >> GG82563_PAGE_SHIFT);
4535 } else {
4536 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4537 reg >> GG82563_PAGE_SHIFT);
4538 }
4539
4540 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4541 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4542 return (rv);
4543 }
4544
4545 /*
4546 * wm_gmii_i80003_writereg: [mii interface function]
4547 *
4548 * Write a PHY register on the kumeran.
4549 * This could be handled by the PHY layer if we didn't have to lock the
4550 * ressource ...
4551 */
4552 static void
4553 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
4554 {
4555 struct wm_softc *sc = (void *) self;
4556 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4557
4558 if (phy != 1) /* only one PHY on kumeran bus */
4559 return;
4560
4561 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4562 return;
4563
4564 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4565 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4566 reg >> GG82563_PAGE_SHIFT);
4567 } else {
4568 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4569 reg >> GG82563_PAGE_SHIFT);
4570 }
4571
4572 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4573 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4574 }
4575
4576 /*
4577 * wm_gmii_statchg: [mii interface function]
4578 *
4579 * Callback from MII layer when media changes.
4580 */
4581 static void
4582 wm_gmii_statchg(struct device *self)
4583 {
4584 struct wm_softc *sc = (void *) self;
4585 struct mii_data *mii = &sc->sc_mii;
4586
4587 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4588 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4589 sc->sc_fcrtl &= ~FCRTL_XONE;
4590
4591 /*
4592 * Get flow control negotiation result.
4593 */
4594 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4595 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4596 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4597 mii->mii_media_active &= ~IFM_ETH_FMASK;
4598 }
4599
4600 if (sc->sc_flowflags & IFM_FLOW) {
4601 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4602 sc->sc_ctrl |= CTRL_TFCE;
4603 sc->sc_fcrtl |= FCRTL_XONE;
4604 }
4605 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4606 sc->sc_ctrl |= CTRL_RFCE;
4607 }
4608
4609 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4610 DPRINTF(WM_DEBUG_LINK,
4611 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
4612 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4613 } else {
4614 DPRINTF(WM_DEBUG_LINK,
4615 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
4616 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4617 }
4618
4619 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4620 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4621 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4622 : WMREG_FCRTL, sc->sc_fcrtl);
4623 if (sc->sc_type >= WM_T_80003) {
4624 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4625 case IFM_1000_T:
4626 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4627 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4628 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4629 break;
4630 default:
4631 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4632 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4633 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4634 break;
4635 }
4636 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4637 }
4638 }
4639
4640 /*
4641 * wm_kmrn_i80003_readreg:
4642 *
4643 * Read a kumeran register
4644 */
4645 static int
4646 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4647 {
4648 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4649 int rv;
4650
4651 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4652 return 0;
4653
4654 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4655 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4656 KUMCTRLSTA_REN);
4657 delay(2);
4658
4659 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4660 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4661 return (rv);
4662 }
4663
4664 /*
4665 * wm_kmrn_i80003_writereg:
4666 *
4667 * Write a kumeran register
4668 */
4669 static void
4670 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4671 {
4672 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4673
4674 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4675 return;
4676
4677 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4678 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4679 (val & KUMCTRLSTA_MASK));
4680 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4681 }
4682
4683 static int
4684 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4685 {
4686 uint32_t eecd = 0;
4687
4688 if (sc->sc_type == WM_T_82573) {
4689 eecd = CSR_READ(sc, WMREG_EECD);
4690
4691 /* Isolate bits 15 & 16 */
4692 eecd = ((eecd >> 15) & 0x03);
4693
4694 /* If both bits are set, device is Flash type */
4695 if (eecd == 0x03) {
4696 return 0;
4697 }
4698 }
4699 return 1;
4700 }
4701
4702 static int
4703 wm_get_swsm_semaphore(struct wm_softc *sc)
4704 {
4705 int32_t timeout;
4706 uint32_t swsm;
4707
4708 /* Get the FW semaphore. */
4709 timeout = 1000 + 1; /* XXX */
4710 while (timeout) {
4711 swsm = CSR_READ(sc, WMREG_SWSM);
4712 swsm |= SWSM_SWESMBI;
4713 CSR_WRITE(sc, WMREG_SWSM, swsm);
4714 /* if we managed to set the bit we got the semaphore. */
4715 swsm = CSR_READ(sc, WMREG_SWSM);
4716 if (swsm & SWSM_SWESMBI)
4717 break;
4718
4719 delay(50);
4720 timeout--;
4721 }
4722
4723 if (timeout == 0) {
4724 aprint_error("%s: could not acquire EEPROM GNT\n",
4725 sc->sc_dev.dv_xname);
4726 /* Release semaphores */
4727 wm_put_swsm_semaphore(sc);
4728 return 1;
4729 }
4730 return 0;
4731 }
4732
4733 static void
4734 wm_put_swsm_semaphore(struct wm_softc *sc)
4735 {
4736 uint32_t swsm;
4737
4738 swsm = CSR_READ(sc, WMREG_SWSM);
4739 swsm &= ~(SWSM_SWESMBI);
4740 CSR_WRITE(sc, WMREG_SWSM, swsm);
4741 }
4742
4743 static int
4744 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4745 {
4746 uint32_t swfw_sync;
4747 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4748 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4749 int timeout = 200;
4750
4751 for(timeout = 0; timeout < 200; timeout++) {
4752 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4753 if (wm_get_swsm_semaphore(sc))
4754 return 1;
4755 }
4756 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4757 if ((swfw_sync & (swmask | fwmask)) == 0) {
4758 swfw_sync |= swmask;
4759 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4760 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4761 wm_put_swsm_semaphore(sc);
4762 return 0;
4763 }
4764 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4765 wm_put_swsm_semaphore(sc);
4766 delay(5000);
4767 }
4768 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4769 sc->sc_dev.dv_xname, mask, swfw_sync);
4770 return 1;
4771 }
4772
4773 static void
4774 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4775 {
4776 uint32_t swfw_sync;
4777
4778 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4779 while (wm_get_swsm_semaphore(sc) != 0)
4780 continue;
4781 }
4782 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4783 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4784 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4785 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4786 wm_put_swsm_semaphore(sc);
4787 }
4788
4789 static int
4790 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4791 {
4792 uint32_t ext_ctrl;
4793 int timeout = 200;
4794
4795 for(timeout = 0; timeout < 200; timeout++) {
4796 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4797 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4798 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4799
4800 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4801 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4802 return 0;
4803 delay(5000);
4804 }
4805 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4806 sc->sc_dev.dv_xname, ext_ctrl);
4807 return 1;
4808 }
4809
4810 static void
4811 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4812 {
4813 uint32_t ext_ctrl;
4814 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4815 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4816 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4817 }
4818
4819 /******************************************************************************
4820 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4821 * register.
4822 *
4823 * sc - Struct containing variables accessed by shared code
4824 * offset - offset of word in the EEPROM to read
4825 * data - word read from the EEPROM
4826 * words - number of words to read
4827 *****************************************************************************/
4828 static int
4829 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4830 {
4831 int32_t error = 0;
4832 uint32_t flash_bank = 0;
4833 uint32_t act_offset = 0;
4834 uint32_t bank_offset = 0;
4835 uint16_t word = 0;
4836 uint16_t i = 0;
4837
4838 /* We need to know which is the valid flash bank. In the event
4839 * that we didn't allocate eeprom_shadow_ram, we may not be
4840 * managing flash_bank. So it cannot be trusted and needs
4841 * to be updated with each read.
4842 */
4843 /* Value of bit 22 corresponds to the flash bank we're on. */
4844 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4845
4846 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4847 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4848
4849 error = wm_get_swfwhw_semaphore(sc);
4850 if (error)
4851 return error;
4852
4853 for (i = 0; i < words; i++) {
4854 /* The NVM part needs a byte offset, hence * 2 */
4855 act_offset = bank_offset + ((offset + i) * 2);
4856 error = wm_read_ich8_word(sc, act_offset, &word);
4857 if (error)
4858 break;
4859 data[i] = word;
4860 }
4861
4862 wm_put_swfwhw_semaphore(sc);
4863 return error;
4864 }
4865
4866 /******************************************************************************
4867 * This function does initial flash setup so that a new read/write/erase cycle
4868 * can be started.
4869 *
4870 * sc - The pointer to the hw structure
4871 ****************************************************************************/
4872 static int32_t
4873 wm_ich8_cycle_init(struct wm_softc *sc)
4874 {
4875 uint16_t hsfsts;
4876 int32_t error = 1;
4877 int32_t i = 0;
4878
4879 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4880
4881 /* May be check the Flash Des Valid bit in Hw status */
4882 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4883 return error;
4884 }
4885
4886 /* Clear FCERR in Hw status by writing 1 */
4887 /* Clear DAEL in Hw status by writing a 1 */
4888 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4889
4890 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4891
4892 /* Either we should have a hardware SPI cycle in progress bit to check
4893 * against, in order to start a new cycle or FDONE bit should be changed
4894 * in the hardware so that it is 1 after harware reset, which can then be
4895 * used as an indication whether a cycle is in progress or has been
4896 * completed .. we should also have some software semaphore mechanism to
4897 * guard FDONE or the cycle in progress bit so that two threads access to
4898 * those bits can be sequentiallized or a way so that 2 threads dont
4899 * start the cycle at the same time */
4900
4901 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4902 /* There is no cycle running at present, so we can start a cycle */
4903 /* Begin by setting Flash Cycle Done. */
4904 hsfsts |= HSFSTS_DONE;
4905 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4906 error = 0;
4907 } else {
4908 /* otherwise poll for sometime so the current cycle has a chance
4909 * to end before giving up. */
4910 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
4911 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4912 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4913 error = 0;
4914 break;
4915 }
4916 delay(1);
4917 }
4918 if (error == 0) {
4919 /* Successful in waiting for previous cycle to timeout,
4920 * now set the Flash Cycle Done. */
4921 hsfsts |= HSFSTS_DONE;
4922 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4923 }
4924 }
4925 return error;
4926 }
4927
4928 /******************************************************************************
4929 * This function starts a flash cycle and waits for its completion
4930 *
4931 * sc - The pointer to the hw structure
4932 ****************************************************************************/
4933 static int32_t
4934 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
4935 {
4936 uint16_t hsflctl;
4937 uint16_t hsfsts;
4938 int32_t error = 1;
4939 uint32_t i = 0;
4940
4941 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
4942 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
4943 hsflctl |= HSFCTL_GO;
4944 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
4945
4946 /* wait till FDONE bit is set to 1 */
4947 do {
4948 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4949 if (hsfsts & HSFSTS_DONE)
4950 break;
4951 delay(1);
4952 i++;
4953 } while (i < timeout);
4954 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
4955 error = 0;
4956 }
4957 return error;
4958 }
4959
4960 /******************************************************************************
4961 * Reads a byte or word from the NVM using the ICH8 flash access registers.
4962 *
4963 * sc - The pointer to the hw structure
4964 * index - The index of the byte or word to read.
4965 * size - Size of data to read, 1=byte 2=word
4966 * data - Pointer to the word to store the value read.
4967 *****************************************************************************/
4968 static int32_t
4969 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
4970 uint32_t size, uint16_t* data)
4971 {
4972 uint16_t hsfsts;
4973 uint16_t hsflctl;
4974 uint32_t flash_linear_address;
4975 uint32_t flash_data = 0;
4976 int32_t error = 1;
4977 int32_t count = 0;
4978
4979 if (size < 1 || size > 2 || data == 0x0 ||
4980 index > ICH_FLASH_LINEAR_ADDR_MASK)
4981 return error;
4982
4983 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
4984 sc->sc_ich8_flash_base;
4985
4986 do {
4987 delay(1);
4988 /* Steps */
4989 error = wm_ich8_cycle_init(sc);
4990 if (error)
4991 break;
4992
4993 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
4994 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4995 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
4996 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
4997 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
4998
4999 /* Write the last 24 bits of index into Flash Linear address field in
5000 * Flash Address */
5001 /* TODO: TBD maybe check the index against the size of flash */
5002
5003 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5004
5005 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5006
5007 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5008 * sequence a few more times, else read in (shift in) the Flash Data0,
5009 * the order is least significant byte first msb to lsb */
5010 if (error == 0) {
5011 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5012 if (size == 1) {
5013 *data = (uint8_t)(flash_data & 0x000000FF);
5014 } else if (size == 2) {
5015 *data = (uint16_t)(flash_data & 0x0000FFFF);
5016 }
5017 break;
5018 } else {
5019 /* If we've gotten here, then things are probably completely hosed,
5020 * but if the error condition is detected, it won't hurt to give
5021 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5022 */
5023 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5024 if (hsfsts & HSFSTS_ERR) {
5025 /* Repeat for some time before giving up. */
5026 continue;
5027 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5028 break;
5029 }
5030 }
5031 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5032
5033 return error;
5034 }
5035
5036 #if 0
5037 /******************************************************************************
5038 * Reads a single byte from the NVM using the ICH8 flash access registers.
5039 *
5040 * sc - pointer to wm_hw structure
5041 * index - The index of the byte to read.
5042 * data - Pointer to a byte to store the value read.
5043 *****************************************************************************/
5044 static int32_t
5045 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5046 {
5047 int32_t status = 0;
5048 uint16_t word = 0;
5049
5050 status = wm_read_ich8_data(sc, index, 1, &word);
5051 if (status == 0) {
5052 *data = (uint8_t)word;
5053 }
5054
5055 return status;
5056 }
5057 #endif
5058
5059 /******************************************************************************
5060 * Reads a word from the NVM using the ICH8 flash access registers.
5061 *
5062 * sc - pointer to wm_hw structure
5063 * index - The starting byte index of the word to read.
5064 * data - Pointer to a word to store the value read.
5065 *****************************************************************************/
5066 static int32_t
5067 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5068 {
5069 int32_t status = 0;
5070 status = wm_read_ich8_data(sc, index, 2, data);
5071 return status;
5072 }
5073