if_wm.c revision 1.153 1 /* $NetBSD: if_wm.c,v 1.153 2008/01/29 20:24:41 tls Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers.
78 * - Fix hw VLAN assist.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.153 2008/01/29 20:24:41 tls Exp $");
83
84 #include "bpfilter.h"
85 #include "rnd.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133
134 #include <dev/pci/if_wmreg.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 typedef enum {
236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */
253 WM_T_80003, /* i80003 */
254 WM_T_ICH8, /* ICH8 LAN */
255 WM_T_ICH9, /* ICH9 LAN */
256 } wm_chip_type;
257
258 /*
259 * Software state per device.
260 */
261 struct wm_softc {
262 struct device sc_dev; /* generic device information */
263 bus_space_tag_t sc_st; /* bus space tag */
264 bus_space_handle_t sc_sh; /* bus space handle */
265 bus_space_tag_t sc_iot; /* I/O space tag */
266 bus_space_handle_t sc_ioh; /* I/O space handle */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270 struct ethercom sc_ethercom; /* ethernet common data */
271 pci_chipset_tag_t sc_pc;
272 pcitag_t sc_pcitag;
273
274 wm_chip_type sc_type; /* chip type */
275 int sc_flags; /* flags; see below */
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcix_offset; /* PCIX capability register offset */
278 int sc_flowflags; /* 802.3x flow control flags */
279
280 void *sc_ih; /* interrupt cookie */
281
282 int sc_ee_addrbits; /* EEPROM address bits */
283
284 struct mii_data sc_mii; /* MII/media information */
285
286 callout_t sc_tick_ch; /* tick callout */
287
288 bus_dmamap_t sc_cddmamap; /* control data DMA map */
289 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
290
291 int sc_align_tweak;
292
293 /*
294 * Software state for the transmit and receive descriptors.
295 */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /*
301 * Control data structures.
302 */
303 int sc_ntxdesc; /* must be a power of two */
304 struct wm_control_data_82544 *sc_control_data;
305 #define sc_txdescs sc_control_data->wcd_txdescs
306 #define sc_rxdescs sc_control_data->wcd_rxdescs
307
308 #ifdef WM_EVENT_COUNTERS
309 /* Event counters. */
310 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
311 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
312 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
313 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
314 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
315 struct evcnt sc_ev_rxintr; /* Rx interrupts */
316 struct evcnt sc_ev_linkintr; /* Link interrupts */
317
318 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
319 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
320 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
321 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
322 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
323 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
324 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
325 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
326
327 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
328 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
329
330 struct evcnt sc_ev_tu; /* Tx underrun */
331
332 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
333 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
334 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
335 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
336 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
337 #endif /* WM_EVENT_COUNTERS */
338
339 bus_addr_t sc_tdt_reg; /* offset of TDT register */
340
341 int sc_txfree; /* number of free Tx descriptors */
342 int sc_txnext; /* next ready Tx descriptor */
343
344 int sc_txsfree; /* number of free Tx jobs */
345 int sc_txsnext; /* next free Tx job */
346 int sc_txsdirty; /* dirty Tx jobs */
347
348 /* These 5 variables are used only on the 82547. */
349 int sc_txfifo_size; /* Tx FIFO size */
350 int sc_txfifo_head; /* current head of FIFO */
351 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
352 int sc_txfifo_stall; /* Tx FIFO is stalled */
353 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
354
355 bus_addr_t sc_rdt_reg; /* offset of RDT register */
356
357 int sc_rxptr; /* next ready Rx descriptor/queue ent */
358 int sc_rxdiscard;
359 int sc_rxlen;
360 struct mbuf *sc_rxhead;
361 struct mbuf *sc_rxtail;
362 struct mbuf **sc_rxtailp;
363
364 uint32_t sc_ctrl; /* prototype CTRL register */
365 #if 0
366 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
367 #endif
368 uint32_t sc_icr; /* prototype interrupt bits */
369 uint32_t sc_itr; /* prototype intr throttling reg */
370 uint32_t sc_tctl; /* prototype TCTL register */
371 uint32_t sc_rctl; /* prototype RCTL register */
372 uint32_t sc_txcw; /* prototype TXCW register */
373 uint32_t sc_tipg; /* prototype TIPG register */
374 uint32_t sc_fcrtl; /* prototype FCRTL register */
375 uint32_t sc_pba; /* prototype PBA register */
376
377 int sc_tbi_linkup; /* TBI link status */
378 int sc_tbi_anstate; /* autonegotiation state */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 #if NRND > 0
383 rndsource_element_t rnd_source; /* random source */
384 #endif
385 int sc_ich8_flash_base;
386 int sc_ich8_flash_bank_size;
387 };
388
389 #define WM_RXCHAIN_RESET(sc) \
390 do { \
391 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
392 *(sc)->sc_rxtailp = NULL; \
393 (sc)->sc_rxlen = 0; \
394 } while (/*CONSTCOND*/0)
395
396 #define WM_RXCHAIN_LINK(sc, m) \
397 do { \
398 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
399 (sc)->sc_rxtailp = &(m)->m_next; \
400 } while (/*CONSTCOND*/0)
401
402 /* sc_flags */
403 #define WM_F_HAS_MII 0x0001 /* has MII */
404 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
405 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
406 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
407 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
408 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
409 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
410 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
411 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
412 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
413 #define WM_F_CSA 0x0400 /* bus is CSA */
414 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
415 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
416 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
417
418 #ifdef WM_EVENT_COUNTERS
419 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
420 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
421 #else
422 #define WM_EVCNT_INCR(ev) /* nothing */
423 #define WM_EVCNT_ADD(ev, val) /* nothing */
424 #endif
425
426 #define CSR_READ(sc, reg) \
427 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
428 #define CSR_WRITE(sc, reg, val) \
429 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
430 #define CSR_WRITE_FLUSH(sc) \
431 (void) CSR_READ((sc), WMREG_STATUS)
432
433 #define ICH8_FLASH_READ32(sc, reg) \
434 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
436 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
437
438 #define ICH8_FLASH_READ16(sc, reg) \
439 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
441 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442
443 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
444 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
445
446 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
447 #define WM_CDTXADDR_HI(sc, x) \
448 (sizeof(bus_addr_t) == 8 ? \
449 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
450
451 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
452 #define WM_CDRXADDR_HI(sc, x) \
453 (sizeof(bus_addr_t) == 8 ? \
454 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
455
456 #define WM_CDTXSYNC(sc, x, n, ops) \
457 do { \
458 int __x, __n; \
459 \
460 __x = (x); \
461 __n = (n); \
462 \
463 /* If it will wrap around, sync to the end of the ring. */ \
464 if ((__x + __n) > WM_NTXDESC(sc)) { \
465 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
466 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
467 (WM_NTXDESC(sc) - __x), (ops)); \
468 __n -= (WM_NTXDESC(sc) - __x); \
469 __x = 0; \
470 } \
471 \
472 /* Now sync whatever is left. */ \
473 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
474 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
475 } while (/*CONSTCOND*/0)
476
477 #define WM_CDRXSYNC(sc, x, ops) \
478 do { \
479 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
480 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
481 } while (/*CONSTCOND*/0)
482
483 #define WM_INIT_RXDESC(sc, x) \
484 do { \
485 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
486 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
487 struct mbuf *__m = __rxs->rxs_mbuf; \
488 \
489 /* \
490 * Note: We scoot the packet forward 2 bytes in the buffer \
491 * so that the payload after the Ethernet header is aligned \
492 * to a 4-byte boundary. \
493 * \
494 * XXX BRAINDAMAGE ALERT! \
495 * The stupid chip uses the same size for every buffer, which \
496 * is set in the Receive Control register. We are using the 2K \
497 * size option, but what we REALLY want is (2K - 2)! For this \
498 * reason, we can't "scoot" packets longer than the standard \
499 * Ethernet MTU. On strict-alignment platforms, if the total \
500 * size exceeds (2K - 2) we set align_tweak to 0 and let \
501 * the upper layer copy the headers. \
502 */ \
503 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
504 \
505 wm_set_dma_addr(&__rxd->wrx_addr, \
506 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
507 __rxd->wrx_len = 0; \
508 __rxd->wrx_cksum = 0; \
509 __rxd->wrx_status = 0; \
510 __rxd->wrx_errors = 0; \
511 __rxd->wrx_special = 0; \
512 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
513 \
514 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
515 } while (/*CONSTCOND*/0)
516
517 static void wm_start(struct ifnet *);
518 static void wm_watchdog(struct ifnet *);
519 static int wm_ioctl(struct ifnet *, u_long, void *);
520 static int wm_init(struct ifnet *);
521 static void wm_stop(struct ifnet *, int);
522
523 static void wm_reset(struct wm_softc *);
524 static void wm_rxdrain(struct wm_softc *);
525 static int wm_add_rxbuf(struct wm_softc *, int);
526 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
527 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
528 static int wm_validate_eeprom_checksum(struct wm_softc *);
529 static void wm_tick(void *);
530
531 static void wm_set_filter(struct wm_softc *);
532
533 static int wm_intr(void *);
534 static void wm_txintr(struct wm_softc *);
535 static void wm_rxintr(struct wm_softc *);
536 static void wm_linkintr(struct wm_softc *, uint32_t);
537
538 static void wm_tbi_mediainit(struct wm_softc *);
539 static int wm_tbi_mediachange(struct ifnet *);
540 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
541
542 static void wm_tbi_set_linkled(struct wm_softc *);
543 static void wm_tbi_check_link(struct wm_softc *);
544
545 static void wm_gmii_reset(struct wm_softc *);
546
547 static int wm_gmii_i82543_readreg(struct device *, int, int);
548 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
549
550 static int wm_gmii_i82544_readreg(struct device *, int, int);
551 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
552
553 static int wm_gmii_i80003_readreg(struct device *, int, int);
554 static void wm_gmii_i80003_writereg(struct device *, int, int, int);
555
556 static void wm_gmii_statchg(struct device *);
557
558 static void wm_gmii_mediainit(struct wm_softc *);
559 static int wm_gmii_mediachange(struct ifnet *);
560 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
561
562 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
563 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
564
565 static int wm_match(struct device *, struct cfdata *, void *);
566 static void wm_attach(struct device *, struct device *, void *);
567 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
568 static void wm_get_auto_rd_done(struct wm_softc *);
569 static int wm_get_swsm_semaphore(struct wm_softc *);
570 static void wm_put_swsm_semaphore(struct wm_softc *);
571 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
572 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
573 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
574 static int wm_get_swfwhw_semaphore(struct wm_softc *);
575 static void wm_put_swfwhw_semaphore(struct wm_softc *);
576
577 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
578 static int32_t wm_ich8_cycle_init(struct wm_softc *);
579 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
580 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
581 uint32_t, uint16_t *);
582 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
583
584 CFATTACH_DECL(wm, sizeof(struct wm_softc),
585 wm_match, wm_attach, NULL, NULL);
586
587 static void wm_82547_txfifo_stall(void *);
588
589 /*
590 * Devices supported by this driver.
591 */
592 static const struct wm_product {
593 pci_vendor_id_t wmp_vendor;
594 pci_product_id_t wmp_product;
595 const char *wmp_name;
596 wm_chip_type wmp_type;
597 int wmp_flags;
598 #define WMP_F_1000X 0x01
599 #define WMP_F_1000T 0x02
600 } wm_products[] = {
601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
602 "Intel i82542 1000BASE-X Ethernet",
603 WM_T_82542_2_1, WMP_F_1000X },
604
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
606 "Intel i82543GC 1000BASE-X Ethernet",
607 WM_T_82543, WMP_F_1000X },
608
609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
610 "Intel i82543GC 1000BASE-T Ethernet",
611 WM_T_82543, WMP_F_1000T },
612
613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
614 "Intel i82544EI 1000BASE-T Ethernet",
615 WM_T_82544, WMP_F_1000T },
616
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
618 "Intel i82544EI 1000BASE-X Ethernet",
619 WM_T_82544, WMP_F_1000X },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
622 "Intel i82544GC 1000BASE-T Ethernet",
623 WM_T_82544, WMP_F_1000T },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
626 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
627 WM_T_82544, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
630 "Intel i82540EM 1000BASE-T Ethernet",
631 WM_T_82540, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
634 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
635 WM_T_82540, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
638 "Intel i82540EP 1000BASE-T Ethernet",
639 WM_T_82540, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
642 "Intel i82540EP 1000BASE-T Ethernet",
643 WM_T_82540, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
646 "Intel i82540EP 1000BASE-T Ethernet",
647 WM_T_82540, WMP_F_1000T },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
650 "Intel i82545EM 1000BASE-T Ethernet",
651 WM_T_82545, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
654 "Intel i82545GM 1000BASE-T Ethernet",
655 WM_T_82545_3, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
658 "Intel i82545GM 1000BASE-X Ethernet",
659 WM_T_82545_3, WMP_F_1000X },
660 #if 0
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
662 "Intel i82545GM Gigabit Ethernet (SERDES)",
663 WM_T_82545_3, WMP_F_SERDES },
664 #endif
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
666 "Intel i82546EB 1000BASE-T Ethernet",
667 WM_T_82546, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
670 "Intel i82546EB 1000BASE-T Ethernet",
671 WM_T_82546, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
674 "Intel i82545EM 1000BASE-X Ethernet",
675 WM_T_82545, WMP_F_1000X },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
678 "Intel i82546EB 1000BASE-X Ethernet",
679 WM_T_82546, WMP_F_1000X },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
682 "Intel i82546GB 1000BASE-T Ethernet",
683 WM_T_82546_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
686 "Intel i82546GB 1000BASE-X Ethernet",
687 WM_T_82546_3, WMP_F_1000X },
688 #if 0
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
690 "Intel i82546GB Gigabit Ethernet (SERDES)",
691 WM_T_82546_3, WMP_F_SERDES },
692 #endif
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
694 "i82546GB quad-port Gigabit Ethernet",
695 WM_T_82546_3, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
698 "i82546GB quad-port Gigabit Ethernet (KSP3)",
699 WM_T_82546_3, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
702 "Intel PRO/1000MT (82546GB)",
703 WM_T_82546_3, WMP_F_1000T },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
706 "Intel i82541EI 1000BASE-T Ethernet",
707 WM_T_82541, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
710 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
711 WM_T_82541, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
714 "Intel i82541EI Mobile 1000BASE-T Ethernet",
715 WM_T_82541, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
718 "Intel i82541ER 1000BASE-T Ethernet",
719 WM_T_82541_2, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
722 "Intel i82541GI 1000BASE-T Ethernet",
723 WM_T_82541_2, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
726 "Intel i82541GI Mobile 1000BASE-T Ethernet",
727 WM_T_82541_2, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
730 "Intel i82541PI 1000BASE-T Ethernet",
731 WM_T_82541_2, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
734 "Intel i82547EI 1000BASE-T Ethernet",
735 WM_T_82547, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
738 "Intel i82547EI Mobile 1000BASE-T Ethernet",
739 WM_T_82547, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
742 "Intel i82547GI 1000BASE-T Ethernet",
743 WM_T_82547_2, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
746 "Intel PRO/1000 PT (82571EB)",
747 WM_T_82571, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
750 "Intel PRO/1000 PF (82571EB)",
751 WM_T_82571, WMP_F_1000X },
752 #if 0
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
754 "Intel PRO/1000 PB (82571EB)",
755 WM_T_82571, WMP_F_SERDES },
756 #endif
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
758 "Intel PRO/1000 QT (82571EB)",
759 WM_T_82571, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
762 "Intel i82572EI 1000baseT Ethernet",
763 WM_T_82572, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
766 "Intel PRO/1000 PT Quad Port Server Adapter",
767 WM_T_82571, WMP_F_1000T, },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
770 "Intel i82572EI 1000baseX Ethernet",
771 WM_T_82572, WMP_F_1000X },
772 #if 0
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
774 "Intel i82572EI Gigabit Ethernet (SERDES)",
775 WM_T_82572, WMP_F_SERDES },
776 #endif
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
779 "Intel i82572EI 1000baseT Ethernet",
780 WM_T_82572, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
783 "Intel i82573E",
784 WM_T_82573, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
787 "Intel i82573E IAMT",
788 WM_T_82573, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
791 "Intel i82573L Gigabit Ethernet",
792 WM_T_82573, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
795 "i80003 dual 1000baseT Ethernet",
796 WM_T_80003, WMP_F_1000T },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
799 "i80003 dual 1000baseX Ethernet",
800 WM_T_80003, WMP_F_1000T },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
803 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
804 WM_T_80003, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
808 "Intel i80003 1000baseT Ethernet",
809 WM_T_80003, WMP_F_1000T },
810 #if 0
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
812 "Intel i80003 Gigabit Ethernet (SERDES)",
813 WM_T_80003, WMP_F_SERDES },
814 #endif
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
816 "Intel i82801H (M_AMT) LAN Controller",
817 WM_T_ICH8, WMP_F_1000T },
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
819 "Intel i82801H (AMT) LAN Controller",
820 WM_T_ICH8, WMP_F_1000T },
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
822 "Intel i82801H LAN Controller",
823 WM_T_ICH8, WMP_F_1000T },
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
825 "Intel i82801H (IFE) LAN Controller",
826 WM_T_ICH8, WMP_F_1000T },
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
828 "Intel i82801H (M) LAN Controller",
829 WM_T_ICH8, WMP_F_1000T },
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
831 "Intel i82801H IFE (GT) LAN Controller",
832 WM_T_ICH8, WMP_F_1000T },
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
834 "Intel i82801H IFE (G) LAN Controller",
835 WM_T_ICH8, WMP_F_1000T },
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
837 "82801I (AMT) LAN Controller",
838 WM_T_ICH9, WMP_F_1000T },
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
840 "82801I LAN Controller",
841 WM_T_ICH9, WMP_F_1000T },
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
843 "82801I (G) LAN Controller",
844 WM_T_ICH9, WMP_F_1000T },
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
846 "82801I (GT) LAN Controller",
847 WM_T_ICH9, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
849 "82801I (C) LAN Controller",
850 WM_T_ICH9, WMP_F_1000T },
851 { 0, 0,
852 NULL,
853 0, 0 },
854 };
855
856 #ifdef WM_EVENT_COUNTERS
857 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
858 #endif /* WM_EVENT_COUNTERS */
859
860 #if 0 /* Not currently used */
861 static inline uint32_t
862 wm_io_read(struct wm_softc *sc, int reg)
863 {
864
865 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
866 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
867 }
868 #endif
869
870 static inline void
871 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
872 {
873
874 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
875 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
876 }
877
878 static inline void
879 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
880 {
881 wa->wa_low = htole32(v & 0xffffffffU);
882 if (sizeof(bus_addr_t) == 8)
883 wa->wa_high = htole32((uint64_t) v >> 32);
884 else
885 wa->wa_high = 0;
886 }
887
888 static const struct wm_product *
889 wm_lookup(const struct pci_attach_args *pa)
890 {
891 const struct wm_product *wmp;
892
893 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
894 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
895 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
896 return (wmp);
897 }
898 return (NULL);
899 }
900
901 static int
902 wm_match(struct device *parent, struct cfdata *cf, void *aux)
903 {
904 struct pci_attach_args *pa = aux;
905
906 if (wm_lookup(pa) != NULL)
907 return (1);
908
909 return (0);
910 }
911
912 static void
913 wm_attach(struct device *parent, struct device *self, void *aux)
914 {
915 struct wm_softc *sc = (void *) self;
916 struct pci_attach_args *pa = aux;
917 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
918 pci_chipset_tag_t pc = pa->pa_pc;
919 pci_intr_handle_t ih;
920 size_t cdata_size;
921 const char *intrstr = NULL;
922 const char *eetype;
923 bus_space_tag_t memt;
924 bus_space_handle_t memh;
925 bus_dma_segment_t seg;
926 int memh_valid;
927 int i, rseg, error;
928 const struct wm_product *wmp;
929 prop_data_t ea;
930 prop_number_t pn;
931 uint8_t enaddr[ETHER_ADDR_LEN];
932 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
933 pcireg_t preg, memtype;
934 uint32_t reg;
935
936 callout_init(&sc->sc_tick_ch, 0);
937
938 wmp = wm_lookup(pa);
939 if (wmp == NULL) {
940 printf("\n");
941 panic("wm_attach: impossible");
942 }
943
944 sc->sc_pc = pa->pa_pc;
945 sc->sc_pcitag = pa->pa_tag;
946
947 if (pci_dma64_available(pa))
948 sc->sc_dmat = pa->pa_dmat64;
949 else
950 sc->sc_dmat = pa->pa_dmat;
951
952 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
953 aprint_naive(": Ethernet controller\n");
954 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
955
956 sc->sc_type = wmp->wmp_type;
957 if (sc->sc_type < WM_T_82543) {
958 if (preg < 2) {
959 aprint_error("%s: i82542 must be at least rev. 2\n",
960 sc->sc_dev.dv_xname);
961 return;
962 }
963 if (preg < 3)
964 sc->sc_type = WM_T_82542_2_0;
965 }
966
967 /*
968 * Map the device. All devices support memory-mapped acccess,
969 * and it is really required for normal operation.
970 */
971 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
972 switch (memtype) {
973 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
974 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
975 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
976 memtype, 0, &memt, &memh, NULL, NULL) == 0);
977 break;
978 default:
979 memh_valid = 0;
980 }
981
982 if (memh_valid) {
983 sc->sc_st = memt;
984 sc->sc_sh = memh;
985 } else {
986 aprint_error("%s: unable to map device registers\n",
987 sc->sc_dev.dv_xname);
988 return;
989 }
990
991 /*
992 * In addition, i82544 and later support I/O mapped indirect
993 * register access. It is not desirable (nor supported in
994 * this driver) to use it for normal operation, though it is
995 * required to work around bugs in some chip versions.
996 */
997 if (sc->sc_type >= WM_T_82544) {
998 /* First we have to find the I/O BAR. */
999 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1000 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1001 PCI_MAPREG_TYPE_IO)
1002 break;
1003 }
1004 if (i == PCI_MAPREG_END)
1005 aprint_error("%s: WARNING: unable to find I/O BAR\n",
1006 sc->sc_dev.dv_xname);
1007 else {
1008 /*
1009 * The i8254x doesn't apparently respond when the
1010 * I/O BAR is 0, which looks somewhat like it's not
1011 * been configured.
1012 */
1013 preg = pci_conf_read(pc, pa->pa_tag, i);
1014 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1015 aprint_error("%s: WARNING: I/O BAR at zero.\n",
1016 sc->sc_dev.dv_xname);
1017 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1018 0, &sc->sc_iot, &sc->sc_ioh,
1019 NULL, NULL) == 0) {
1020 sc->sc_flags |= WM_F_IOH_VALID;
1021 } else {
1022 aprint_error("%s: WARNING: unable to map "
1023 "I/O space\n", sc->sc_dev.dv_xname);
1024 }
1025 }
1026
1027 }
1028
1029 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1030 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1031 preg |= PCI_COMMAND_MASTER_ENABLE;
1032 if (sc->sc_type < WM_T_82542_2_1)
1033 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1034 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1035
1036 /* power up chip */
1037 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
1038 NULL)) && error != EOPNOTSUPP) {
1039 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
1040 error);
1041 return;
1042 }
1043
1044 /*
1045 * Map and establish our interrupt.
1046 */
1047 if (pci_intr_map(pa, &ih)) {
1048 aprint_error("%s: unable to map interrupt\n",
1049 sc->sc_dev.dv_xname);
1050 return;
1051 }
1052 intrstr = pci_intr_string(pc, ih);
1053 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1054 if (sc->sc_ih == NULL) {
1055 aprint_error("%s: unable to establish interrupt",
1056 sc->sc_dev.dv_xname);
1057 if (intrstr != NULL)
1058 aprint_normal(" at %s", intrstr);
1059 aprint_normal("\n");
1060 return;
1061 }
1062 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
1063
1064 /*
1065 * Determine a few things about the bus we're connected to.
1066 */
1067 if (sc->sc_type < WM_T_82543) {
1068 /* We don't really know the bus characteristics here. */
1069 sc->sc_bus_speed = 33;
1070 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1071 /*
1072 * CSA (Communication Streaming Architecture) is about as fast
1073 * a 32-bit 66MHz PCI Bus.
1074 */
1075 sc->sc_flags |= WM_F_CSA;
1076 sc->sc_bus_speed = 66;
1077 aprint_verbose("%s: Communication Streaming Architecture\n",
1078 sc->sc_dev.dv_xname);
1079 if (sc->sc_type == WM_T_82547) {
1080 callout_init(&sc->sc_txfifo_ch, 0);
1081 callout_setfunc(&sc->sc_txfifo_ch,
1082 wm_82547_txfifo_stall, sc);
1083 aprint_verbose("%s: using 82547 Tx FIFO stall "
1084 "work-around\n", sc->sc_dev.dv_xname);
1085 }
1086 } else if (sc->sc_type >= WM_T_82571) {
1087 sc->sc_flags |= WM_F_PCIE;
1088 if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9))
1089 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1090 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
1091 } else {
1092 reg = CSR_READ(sc, WMREG_STATUS);
1093 if (reg & STATUS_BUS64)
1094 sc->sc_flags |= WM_F_BUS64;
1095 if (sc->sc_type >= WM_T_82544 &&
1096 (reg & STATUS_PCIX_MODE) != 0) {
1097 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1098
1099 sc->sc_flags |= WM_F_PCIX;
1100 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1101 PCI_CAP_PCIX,
1102 &sc->sc_pcix_offset, NULL) == 0)
1103 aprint_error("%s: unable to find PCIX "
1104 "capability\n", sc->sc_dev.dv_xname);
1105 else if (sc->sc_type != WM_T_82545_3 &&
1106 sc->sc_type != WM_T_82546_3) {
1107 /*
1108 * Work around a problem caused by the BIOS
1109 * setting the max memory read byte count
1110 * incorrectly.
1111 */
1112 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1113 sc->sc_pcix_offset + PCI_PCIX_CMD);
1114 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1115 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1116
1117 bytecnt =
1118 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1119 PCI_PCIX_CMD_BYTECNT_SHIFT;
1120 maxb =
1121 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1122 PCI_PCIX_STATUS_MAXB_SHIFT;
1123 if (bytecnt > maxb) {
1124 aprint_verbose("%s: resetting PCI-X "
1125 "MMRBC: %d -> %d\n",
1126 sc->sc_dev.dv_xname,
1127 512 << bytecnt, 512 << maxb);
1128 pcix_cmd = (pcix_cmd &
1129 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1130 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1131 pci_conf_write(pa->pa_pc, pa->pa_tag,
1132 sc->sc_pcix_offset + PCI_PCIX_CMD,
1133 pcix_cmd);
1134 }
1135 }
1136 }
1137 /*
1138 * The quad port adapter is special; it has a PCIX-PCIX
1139 * bridge on the board, and can run the secondary bus at
1140 * a higher speed.
1141 */
1142 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1143 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1144 : 66;
1145 } else if (sc->sc_flags & WM_F_PCIX) {
1146 switch (reg & STATUS_PCIXSPD_MASK) {
1147 case STATUS_PCIXSPD_50_66:
1148 sc->sc_bus_speed = 66;
1149 break;
1150 case STATUS_PCIXSPD_66_100:
1151 sc->sc_bus_speed = 100;
1152 break;
1153 case STATUS_PCIXSPD_100_133:
1154 sc->sc_bus_speed = 133;
1155 break;
1156 default:
1157 aprint_error(
1158 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
1159 sc->sc_dev.dv_xname,
1160 reg & STATUS_PCIXSPD_MASK);
1161 sc->sc_bus_speed = 66;
1162 }
1163 } else
1164 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1165 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
1166 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1167 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1168 }
1169
1170 /*
1171 * Allocate the control data structures, and create and load the
1172 * DMA map for it.
1173 *
1174 * NOTE: All Tx descriptors must be in the same 4G segment of
1175 * memory. So must Rx descriptors. We simplify by allocating
1176 * both sets within the same 4G segment.
1177 */
1178 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1179 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1180 cdata_size = sc->sc_type < WM_T_82544 ?
1181 sizeof(struct wm_control_data_82542) :
1182 sizeof(struct wm_control_data_82544);
1183 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1184 (bus_size_t) 0x100000000ULL,
1185 &seg, 1, &rseg, 0)) != 0) {
1186 aprint_error(
1187 "%s: unable to allocate control data, error = %d\n",
1188 sc->sc_dev.dv_xname, error);
1189 goto fail_0;
1190 }
1191
1192 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1193 (void **)&sc->sc_control_data, 0)) != 0) {
1194 aprint_error("%s: unable to map control data, error = %d\n",
1195 sc->sc_dev.dv_xname, error);
1196 goto fail_1;
1197 }
1198
1199 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1200 0, 0, &sc->sc_cddmamap)) != 0) {
1201 aprint_error("%s: unable to create control data DMA map, "
1202 "error = %d\n", sc->sc_dev.dv_xname, error);
1203 goto fail_2;
1204 }
1205
1206 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1207 sc->sc_control_data, cdata_size, NULL,
1208 0)) != 0) {
1209 aprint_error(
1210 "%s: unable to load control data DMA map, error = %d\n",
1211 sc->sc_dev.dv_xname, error);
1212 goto fail_3;
1213 }
1214
1215
1216 /*
1217 * Create the transmit buffer DMA maps.
1218 */
1219 WM_TXQUEUELEN(sc) =
1220 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1221 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1222 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1223 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1224 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1225 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1226 aprint_error("%s: unable to create Tx DMA map %d, "
1227 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1228 goto fail_4;
1229 }
1230 }
1231
1232 /*
1233 * Create the receive buffer DMA maps.
1234 */
1235 for (i = 0; i < WM_NRXDESC; i++) {
1236 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1237 MCLBYTES, 0, 0,
1238 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1239 aprint_error("%s: unable to create Rx DMA map %d, "
1240 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1241 goto fail_5;
1242 }
1243 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1244 }
1245
1246 /* clear interesting stat counters */
1247 CSR_READ(sc, WMREG_COLC);
1248 CSR_READ(sc, WMREG_RXERRC);
1249
1250 /*
1251 * Reset the chip to a known state.
1252 */
1253 wm_reset(sc);
1254
1255 /*
1256 * Get some information about the EEPROM.
1257 */
1258 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1259 uint32_t flash_size;
1260 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1261 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1262 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1263 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1264 printf("%s: can't map FLASH registers\n",
1265 sc->sc_dev.dv_xname);
1266 return;
1267 }
1268 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1269 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1270 ICH_FLASH_SECTOR_SIZE;
1271 sc->sc_ich8_flash_bank_size =
1272 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1273 sc->sc_ich8_flash_bank_size -=
1274 (flash_size & ICH_GFPREG_BASE_MASK);
1275 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1276 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1277 } else if (sc->sc_type == WM_T_80003)
1278 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1279 else if (sc->sc_type == WM_T_82573)
1280 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1281 else if (sc->sc_type > WM_T_82544)
1282 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1283
1284 if (sc->sc_type <= WM_T_82544)
1285 sc->sc_ee_addrbits = 6;
1286 else if (sc->sc_type <= WM_T_82546_3) {
1287 reg = CSR_READ(sc, WMREG_EECD);
1288 if (reg & EECD_EE_SIZE)
1289 sc->sc_ee_addrbits = 8;
1290 else
1291 sc->sc_ee_addrbits = 6;
1292 } else if (sc->sc_type <= WM_T_82547_2) {
1293 reg = CSR_READ(sc, WMREG_EECD);
1294 if (reg & EECD_EE_TYPE) {
1295 sc->sc_flags |= WM_F_EEPROM_SPI;
1296 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1297 } else
1298 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1299 } else if ((sc->sc_type == WM_T_82573) &&
1300 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1301 sc->sc_flags |= WM_F_EEPROM_FLASH;
1302 } else {
1303 /* Assume everything else is SPI. */
1304 reg = CSR_READ(sc, WMREG_EECD);
1305 sc->sc_flags |= WM_F_EEPROM_SPI;
1306 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1307 }
1308
1309 /*
1310 * Defer printing the EEPROM type until after verifying the checksum
1311 * This allows the EEPROM type to be printed correctly in the case
1312 * that no EEPROM is attached.
1313 */
1314
1315
1316 /*
1317 * Validate the EEPROM checksum. If the checksum fails, flag this for
1318 * later, so we can fail future reads from the EEPROM.
1319 */
1320 if (wm_validate_eeprom_checksum(sc))
1321 sc->sc_flags |= WM_F_EEPROM_INVALID;
1322
1323 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1324 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
1325 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1326 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
1327 } else {
1328 if (sc->sc_flags & WM_F_EEPROM_SPI)
1329 eetype = "SPI";
1330 else
1331 eetype = "MicroWire";
1332 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1333 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1334 sc->sc_ee_addrbits, eetype);
1335 }
1336
1337 /*
1338 * Read the Ethernet address from the EEPROM, if not first found
1339 * in device properties.
1340 */
1341 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1342 if (ea != NULL) {
1343 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1344 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1345 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1346 } else {
1347 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1348 sizeof(myea) / sizeof(myea[0]), myea)) {
1349 aprint_error("%s: unable to read Ethernet address\n",
1350 sc->sc_dev.dv_xname);
1351 return;
1352 }
1353 enaddr[0] = myea[0] & 0xff;
1354 enaddr[1] = myea[0] >> 8;
1355 enaddr[2] = myea[1] & 0xff;
1356 enaddr[3] = myea[1] >> 8;
1357 enaddr[4] = myea[2] & 0xff;
1358 enaddr[5] = myea[2] >> 8;
1359 }
1360
1361 /*
1362 * Toggle the LSB of the MAC address on the second port
1363 * of the dual port controller.
1364 */
1365 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1366 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1367 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1368 enaddr[5] ^= 1;
1369 }
1370
1371 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1372 ether_sprintf(enaddr));
1373
1374 /*
1375 * Read the config info from the EEPROM, and set up various
1376 * bits in the control registers based on their contents.
1377 */
1378 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1379 "i82543-cfg1");
1380 if (pn != NULL) {
1381 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1382 cfg1 = (uint16_t) prop_number_integer_value(pn);
1383 } else {
1384 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1385 aprint_error("%s: unable to read CFG1\n",
1386 sc->sc_dev.dv_xname);
1387 return;
1388 }
1389 }
1390
1391 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1392 "i82543-cfg2");
1393 if (pn != NULL) {
1394 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1395 cfg2 = (uint16_t) prop_number_integer_value(pn);
1396 } else {
1397 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1398 aprint_error("%s: unable to read CFG2\n",
1399 sc->sc_dev.dv_xname);
1400 return;
1401 }
1402 }
1403
1404 if (sc->sc_type >= WM_T_82544) {
1405 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1406 "i82543-swdpin");
1407 if (pn != NULL) {
1408 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1409 swdpin = (uint16_t) prop_number_integer_value(pn);
1410 } else {
1411 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1412 aprint_error("%s: unable to read SWDPIN\n",
1413 sc->sc_dev.dv_xname);
1414 return;
1415 }
1416 }
1417 }
1418
1419 if (cfg1 & EEPROM_CFG1_ILOS)
1420 sc->sc_ctrl |= CTRL_ILOS;
1421 if (sc->sc_type >= WM_T_82544) {
1422 sc->sc_ctrl |=
1423 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1424 CTRL_SWDPIO_SHIFT;
1425 sc->sc_ctrl |=
1426 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1427 CTRL_SWDPINS_SHIFT;
1428 } else {
1429 sc->sc_ctrl |=
1430 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1431 CTRL_SWDPIO_SHIFT;
1432 }
1433
1434 #if 0
1435 if (sc->sc_type >= WM_T_82544) {
1436 if (cfg1 & EEPROM_CFG1_IPS0)
1437 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1438 if (cfg1 & EEPROM_CFG1_IPS1)
1439 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1440 sc->sc_ctrl_ext |=
1441 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1442 CTRL_EXT_SWDPIO_SHIFT;
1443 sc->sc_ctrl_ext |=
1444 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1445 CTRL_EXT_SWDPINS_SHIFT;
1446 } else {
1447 sc->sc_ctrl_ext |=
1448 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1449 CTRL_EXT_SWDPIO_SHIFT;
1450 }
1451 #endif
1452
1453 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1454 #if 0
1455 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1456 #endif
1457
1458 /*
1459 * Set up some register offsets that are different between
1460 * the i82542 and the i82543 and later chips.
1461 */
1462 if (sc->sc_type < WM_T_82543) {
1463 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1464 sc->sc_tdt_reg = WMREG_OLD_TDT;
1465 } else {
1466 sc->sc_rdt_reg = WMREG_RDT;
1467 sc->sc_tdt_reg = WMREG_TDT;
1468 }
1469
1470 /*
1471 * Determine if we're TBI or GMII mode, and initialize the
1472 * media structures accordingly.
1473 */
1474 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1475 || sc->sc_type == WM_T_82573) {
1476 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1477 wm_gmii_mediainit(sc);
1478 } else if (sc->sc_type < WM_T_82543 ||
1479 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1480 if (wmp->wmp_flags & WMP_F_1000T)
1481 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1482 "product!\n", sc->sc_dev.dv_xname);
1483 wm_tbi_mediainit(sc);
1484 } else {
1485 if (wmp->wmp_flags & WMP_F_1000X)
1486 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1487 "product!\n", sc->sc_dev.dv_xname);
1488 wm_gmii_mediainit(sc);
1489 }
1490
1491 ifp = &sc->sc_ethercom.ec_if;
1492 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1493 ifp->if_softc = sc;
1494 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1495 ifp->if_ioctl = wm_ioctl;
1496 ifp->if_start = wm_start;
1497 ifp->if_watchdog = wm_watchdog;
1498 ifp->if_init = wm_init;
1499 ifp->if_stop = wm_stop;
1500 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1501 IFQ_SET_READY(&ifp->if_snd);
1502
1503 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1504 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1505
1506 /*
1507 * If we're a i82543 or greater, we can support VLANs.
1508 */
1509 if (sc->sc_type >= WM_T_82543)
1510 sc->sc_ethercom.ec_capabilities |=
1511 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1512
1513 /*
1514 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1515 * on i82543 and later.
1516 */
1517 if (sc->sc_type >= WM_T_82543) {
1518 ifp->if_capabilities |=
1519 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1520 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1521 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1522 IFCAP_CSUM_TCPv6_Tx |
1523 IFCAP_CSUM_UDPv6_Tx;
1524 }
1525
1526 /*
1527 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1528 *
1529 * 82541GI (8086:1076) ... no
1530 * 82572EI (8086:10b9) ... yes
1531 */
1532 if (sc->sc_type >= WM_T_82571) {
1533 ifp->if_capabilities |=
1534 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1535 }
1536
1537 /*
1538 * If we're a i82544 or greater (except i82547), we can do
1539 * TCP segmentation offload.
1540 */
1541 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1542 ifp->if_capabilities |= IFCAP_TSOv4;
1543 }
1544
1545 if (sc->sc_type >= WM_T_82571) {
1546 ifp->if_capabilities |= IFCAP_TSOv6;
1547 }
1548
1549 /*
1550 * Attach the interface.
1551 */
1552 if_attach(ifp);
1553 ether_ifattach(ifp, enaddr);
1554 #if NRND > 0
1555 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1556 RND_TYPE_NET, 0);
1557 #endif
1558
1559 #ifdef WM_EVENT_COUNTERS
1560 /* Attach event counters. */
1561 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1562 NULL, sc->sc_dev.dv_xname, "txsstall");
1563 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1564 NULL, sc->sc_dev.dv_xname, "txdstall");
1565 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1566 NULL, sc->sc_dev.dv_xname, "txfifo_stall");
1567 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1568 NULL, sc->sc_dev.dv_xname, "txdw");
1569 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1570 NULL, sc->sc_dev.dv_xname, "txqe");
1571 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1572 NULL, sc->sc_dev.dv_xname, "rxintr");
1573 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1574 NULL, sc->sc_dev.dv_xname, "linkintr");
1575
1576 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1577 NULL, sc->sc_dev.dv_xname, "rxipsum");
1578 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1579 NULL, sc->sc_dev.dv_xname, "rxtusum");
1580 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1581 NULL, sc->sc_dev.dv_xname, "txipsum");
1582 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1583 NULL, sc->sc_dev.dv_xname, "txtusum");
1584 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1585 NULL, sc->sc_dev.dv_xname, "txtusum6");
1586
1587 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1588 NULL, sc->sc_dev.dv_xname, "txtso");
1589 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1590 NULL, sc->sc_dev.dv_xname, "txtso6");
1591 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1592 NULL, sc->sc_dev.dv_xname, "txtsopain");
1593
1594 for (i = 0; i < WM_NTXSEGS; i++) {
1595 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1596 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1597 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1598 }
1599
1600 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1601 NULL, sc->sc_dev.dv_xname, "txdrop");
1602
1603 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1604 NULL, sc->sc_dev.dv_xname, "tu");
1605
1606 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1607 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1608 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1609 NULL, sc->sc_dev.dv_xname, "tx_xon");
1610 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1611 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1612 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1613 NULL, sc->sc_dev.dv_xname, "rx_xon");
1614 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1615 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1616 #endif /* WM_EVENT_COUNTERS */
1617
1618 if (!pmf_device_register(self, NULL, NULL))
1619 aprint_error_dev(self, "couldn't establish power handler\n");
1620 else
1621 pmf_class_network_register(self, ifp);
1622
1623 return;
1624
1625 /*
1626 * Free any resources we've allocated during the failed attach
1627 * attempt. Do this in reverse order and fall through.
1628 */
1629 fail_5:
1630 for (i = 0; i < WM_NRXDESC; i++) {
1631 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1632 bus_dmamap_destroy(sc->sc_dmat,
1633 sc->sc_rxsoft[i].rxs_dmamap);
1634 }
1635 fail_4:
1636 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1637 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1638 bus_dmamap_destroy(sc->sc_dmat,
1639 sc->sc_txsoft[i].txs_dmamap);
1640 }
1641 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1642 fail_3:
1643 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1644 fail_2:
1645 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1646 cdata_size);
1647 fail_1:
1648 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1649 fail_0:
1650 return;
1651 }
1652
1653 /*
1654 * wm_tx_offload:
1655 *
1656 * Set up TCP/IP checksumming parameters for the
1657 * specified packet.
1658 */
1659 static int
1660 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1661 uint8_t *fieldsp)
1662 {
1663 struct mbuf *m0 = txs->txs_mbuf;
1664 struct livengood_tcpip_ctxdesc *t;
1665 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1666 uint32_t ipcse;
1667 struct ether_header *eh;
1668 int offset, iphl;
1669 uint8_t fields;
1670
1671 /*
1672 * XXX It would be nice if the mbuf pkthdr had offset
1673 * fields for the protocol headers.
1674 */
1675
1676 eh = mtod(m0, struct ether_header *);
1677 switch (htons(eh->ether_type)) {
1678 case ETHERTYPE_IP:
1679 case ETHERTYPE_IPV6:
1680 offset = ETHER_HDR_LEN;
1681 break;
1682
1683 case ETHERTYPE_VLAN:
1684 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1685 break;
1686
1687 default:
1688 /*
1689 * Don't support this protocol or encapsulation.
1690 */
1691 *fieldsp = 0;
1692 *cmdp = 0;
1693 return (0);
1694 }
1695
1696 if ((m0->m_pkthdr.csum_flags &
1697 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1698 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1699 } else {
1700 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1701 }
1702 ipcse = offset + iphl - 1;
1703
1704 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1705 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1706 seg = 0;
1707 fields = 0;
1708
1709 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1710 int hlen = offset + iphl;
1711 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1712
1713 if (__predict_false(m0->m_len <
1714 (hlen + sizeof(struct tcphdr)))) {
1715 /*
1716 * TCP/IP headers are not in the first mbuf; we need
1717 * to do this the slow and painful way. Let's just
1718 * hope this doesn't happen very often.
1719 */
1720 struct tcphdr th;
1721
1722 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1723
1724 m_copydata(m0, hlen, sizeof(th), &th);
1725 if (v4) {
1726 struct ip ip;
1727
1728 m_copydata(m0, offset, sizeof(ip), &ip);
1729 ip.ip_len = 0;
1730 m_copyback(m0,
1731 offset + offsetof(struct ip, ip_len),
1732 sizeof(ip.ip_len), &ip.ip_len);
1733 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1734 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1735 } else {
1736 struct ip6_hdr ip6;
1737
1738 m_copydata(m0, offset, sizeof(ip6), &ip6);
1739 ip6.ip6_plen = 0;
1740 m_copyback(m0,
1741 offset + offsetof(struct ip6_hdr, ip6_plen),
1742 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1743 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1744 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1745 }
1746 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1747 sizeof(th.th_sum), &th.th_sum);
1748
1749 hlen += th.th_off << 2;
1750 } else {
1751 /*
1752 * TCP/IP headers are in the first mbuf; we can do
1753 * this the easy way.
1754 */
1755 struct tcphdr *th;
1756
1757 if (v4) {
1758 struct ip *ip =
1759 (void *)(mtod(m0, char *) + offset);
1760 th = (void *)(mtod(m0, char *) + hlen);
1761
1762 ip->ip_len = 0;
1763 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1764 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1765 } else {
1766 struct ip6_hdr *ip6 =
1767 (void *)(mtod(m0, char *) + offset);
1768 th = (void *)(mtod(m0, char *) + hlen);
1769
1770 ip6->ip6_plen = 0;
1771 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1772 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1773 }
1774 hlen += th->th_off << 2;
1775 }
1776
1777 if (v4) {
1778 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1779 cmdlen |= WTX_TCPIP_CMD_IP;
1780 } else {
1781 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1782 ipcse = 0;
1783 }
1784 cmd |= WTX_TCPIP_CMD_TSE;
1785 cmdlen |= WTX_TCPIP_CMD_TSE |
1786 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1787 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1788 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1789 }
1790
1791 /*
1792 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1793 * offload feature, if we load the context descriptor, we
1794 * MUST provide valid values for IPCSS and TUCSS fields.
1795 */
1796
1797 ipcs = WTX_TCPIP_IPCSS(offset) |
1798 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1799 WTX_TCPIP_IPCSE(ipcse);
1800 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1801 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1802 fields |= WTX_IXSM;
1803 }
1804
1805 offset += iphl;
1806
1807 if (m0->m_pkthdr.csum_flags &
1808 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1809 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1810 fields |= WTX_TXSM;
1811 tucs = WTX_TCPIP_TUCSS(offset) |
1812 WTX_TCPIP_TUCSO(offset +
1813 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1814 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1815 } else if ((m0->m_pkthdr.csum_flags &
1816 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1817 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1818 fields |= WTX_TXSM;
1819 tucs = WTX_TCPIP_TUCSS(offset) |
1820 WTX_TCPIP_TUCSO(offset +
1821 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1822 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1823 } else {
1824 /* Just initialize it to a valid TCP context. */
1825 tucs = WTX_TCPIP_TUCSS(offset) |
1826 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1827 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1828 }
1829
1830 /* Fill in the context descriptor. */
1831 t = (struct livengood_tcpip_ctxdesc *)
1832 &sc->sc_txdescs[sc->sc_txnext];
1833 t->tcpip_ipcs = htole32(ipcs);
1834 t->tcpip_tucs = htole32(tucs);
1835 t->tcpip_cmdlen = htole32(cmdlen);
1836 t->tcpip_seg = htole32(seg);
1837 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1838
1839 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1840 txs->txs_ndesc++;
1841
1842 *cmdp = cmd;
1843 *fieldsp = fields;
1844
1845 return (0);
1846 }
1847
1848 static void
1849 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1850 {
1851 struct mbuf *m;
1852 int i;
1853
1854 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
1855 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1856 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1857 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
1858 m->m_data, m->m_len, m->m_flags);
1859 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
1860 i, i == 1 ? "" : "s");
1861 }
1862
1863 /*
1864 * wm_82547_txfifo_stall:
1865 *
1866 * Callout used to wait for the 82547 Tx FIFO to drain,
1867 * reset the FIFO pointers, and restart packet transmission.
1868 */
1869 static void
1870 wm_82547_txfifo_stall(void *arg)
1871 {
1872 struct wm_softc *sc = arg;
1873 int s;
1874
1875 s = splnet();
1876
1877 if (sc->sc_txfifo_stall) {
1878 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1879 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1880 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1881 /*
1882 * Packets have drained. Stop transmitter, reset
1883 * FIFO pointers, restart transmitter, and kick
1884 * the packet queue.
1885 */
1886 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1887 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1888 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1889 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1890 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1891 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1892 CSR_WRITE(sc, WMREG_TCTL, tctl);
1893 CSR_WRITE_FLUSH(sc);
1894
1895 sc->sc_txfifo_head = 0;
1896 sc->sc_txfifo_stall = 0;
1897 wm_start(&sc->sc_ethercom.ec_if);
1898 } else {
1899 /*
1900 * Still waiting for packets to drain; try again in
1901 * another tick.
1902 */
1903 callout_schedule(&sc->sc_txfifo_ch, 1);
1904 }
1905 }
1906
1907 splx(s);
1908 }
1909
1910 /*
1911 * wm_82547_txfifo_bugchk:
1912 *
1913 * Check for bug condition in the 82547 Tx FIFO. We need to
1914 * prevent enqueueing a packet that would wrap around the end
1915 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1916 *
1917 * We do this by checking the amount of space before the end
1918 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1919 * the Tx FIFO, wait for all remaining packets to drain, reset
1920 * the internal FIFO pointers to the beginning, and restart
1921 * transmission on the interface.
1922 */
1923 #define WM_FIFO_HDR 0x10
1924 #define WM_82547_PAD_LEN 0x3e0
1925 static int
1926 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1927 {
1928 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1929 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1930
1931 /* Just return if already stalled. */
1932 if (sc->sc_txfifo_stall)
1933 return (1);
1934
1935 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1936 /* Stall only occurs in half-duplex mode. */
1937 goto send_packet;
1938 }
1939
1940 if (len >= WM_82547_PAD_LEN + space) {
1941 sc->sc_txfifo_stall = 1;
1942 callout_schedule(&sc->sc_txfifo_ch, 1);
1943 return (1);
1944 }
1945
1946 send_packet:
1947 sc->sc_txfifo_head += len;
1948 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1949 sc->sc_txfifo_head -= sc->sc_txfifo_size;
1950
1951 return (0);
1952 }
1953
1954 /*
1955 * wm_start: [ifnet interface function]
1956 *
1957 * Start packet transmission on the interface.
1958 */
1959 static void
1960 wm_start(struct ifnet *ifp)
1961 {
1962 struct wm_softc *sc = ifp->if_softc;
1963 struct mbuf *m0;
1964 #if 0 /* XXXJRT */
1965 struct m_tag *mtag;
1966 #endif
1967 struct wm_txsoft *txs;
1968 bus_dmamap_t dmamap;
1969 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
1970 bus_addr_t curaddr;
1971 bus_size_t seglen, curlen;
1972 uint32_t cksumcmd;
1973 uint8_t cksumfields;
1974
1975 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1976 return;
1977
1978 /*
1979 * Remember the previous number of free descriptors.
1980 */
1981 ofree = sc->sc_txfree;
1982
1983 /*
1984 * Loop through the send queue, setting up transmit descriptors
1985 * until we drain the queue, or use up all available transmit
1986 * descriptors.
1987 */
1988 for (;;) {
1989 /* Grab a packet off the queue. */
1990 IFQ_POLL(&ifp->if_snd, m0);
1991 if (m0 == NULL)
1992 break;
1993
1994 DPRINTF(WM_DEBUG_TX,
1995 ("%s: TX: have packet to transmit: %p\n",
1996 sc->sc_dev.dv_xname, m0));
1997
1998 /* Get a work queue entry. */
1999 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2000 wm_txintr(sc);
2001 if (sc->sc_txsfree == 0) {
2002 DPRINTF(WM_DEBUG_TX,
2003 ("%s: TX: no free job descriptors\n",
2004 sc->sc_dev.dv_xname));
2005 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2006 break;
2007 }
2008 }
2009
2010 txs = &sc->sc_txsoft[sc->sc_txsnext];
2011 dmamap = txs->txs_dmamap;
2012
2013 use_tso = (m0->m_pkthdr.csum_flags &
2014 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2015
2016 /*
2017 * So says the Linux driver:
2018 * The controller does a simple calculation to make sure
2019 * there is enough room in the FIFO before initiating the
2020 * DMA for each buffer. The calc is:
2021 * 4 = ceil(buffer len / MSS)
2022 * To make sure we don't overrun the FIFO, adjust the max
2023 * buffer len if the MSS drops.
2024 */
2025 dmamap->dm_maxsegsz =
2026 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2027 ? m0->m_pkthdr.segsz << 2
2028 : WTX_MAX_LEN;
2029
2030 /*
2031 * Load the DMA map. If this fails, the packet either
2032 * didn't fit in the allotted number of segments, or we
2033 * were short on resources. For the too-many-segments
2034 * case, we simply report an error and drop the packet,
2035 * since we can't sanely copy a jumbo packet to a single
2036 * buffer.
2037 */
2038 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2039 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2040 if (error) {
2041 if (error == EFBIG) {
2042 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2043 log(LOG_ERR, "%s: Tx packet consumes too many "
2044 "DMA segments, dropping...\n",
2045 sc->sc_dev.dv_xname);
2046 IFQ_DEQUEUE(&ifp->if_snd, m0);
2047 wm_dump_mbuf_chain(sc, m0);
2048 m_freem(m0);
2049 continue;
2050 }
2051 /*
2052 * Short on resources, just stop for now.
2053 */
2054 DPRINTF(WM_DEBUG_TX,
2055 ("%s: TX: dmamap load failed: %d\n",
2056 sc->sc_dev.dv_xname, error));
2057 break;
2058 }
2059
2060 segs_needed = dmamap->dm_nsegs;
2061 if (use_tso) {
2062 /* For sentinel descriptor; see below. */
2063 segs_needed++;
2064 }
2065
2066 /*
2067 * Ensure we have enough descriptors free to describe
2068 * the packet. Note, we always reserve one descriptor
2069 * at the end of the ring due to the semantics of the
2070 * TDT register, plus one more in the event we need
2071 * to load offload context.
2072 */
2073 if (segs_needed > sc->sc_txfree - 2) {
2074 /*
2075 * Not enough free descriptors to transmit this
2076 * packet. We haven't committed anything yet,
2077 * so just unload the DMA map, put the packet
2078 * pack on the queue, and punt. Notify the upper
2079 * layer that there are no more slots left.
2080 */
2081 DPRINTF(WM_DEBUG_TX,
2082 ("%s: TX: need %d (%d) descriptors, have %d\n",
2083 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
2084 sc->sc_txfree - 1));
2085 ifp->if_flags |= IFF_OACTIVE;
2086 bus_dmamap_unload(sc->sc_dmat, dmamap);
2087 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2088 break;
2089 }
2090
2091 /*
2092 * Check for 82547 Tx FIFO bug. We need to do this
2093 * once we know we can transmit the packet, since we
2094 * do some internal FIFO space accounting here.
2095 */
2096 if (sc->sc_type == WM_T_82547 &&
2097 wm_82547_txfifo_bugchk(sc, m0)) {
2098 DPRINTF(WM_DEBUG_TX,
2099 ("%s: TX: 82547 Tx FIFO bug detected\n",
2100 sc->sc_dev.dv_xname));
2101 ifp->if_flags |= IFF_OACTIVE;
2102 bus_dmamap_unload(sc->sc_dmat, dmamap);
2103 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2104 break;
2105 }
2106
2107 IFQ_DEQUEUE(&ifp->if_snd, m0);
2108
2109 /*
2110 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2111 */
2112
2113 DPRINTF(WM_DEBUG_TX,
2114 ("%s: TX: packet has %d (%d) DMA segments\n",
2115 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
2116
2117 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2118
2119 /*
2120 * Store a pointer to the packet so that we can free it
2121 * later.
2122 *
2123 * Initially, we consider the number of descriptors the
2124 * packet uses the number of DMA segments. This may be
2125 * incremented by 1 if we do checksum offload (a descriptor
2126 * is used to set the checksum context).
2127 */
2128 txs->txs_mbuf = m0;
2129 txs->txs_firstdesc = sc->sc_txnext;
2130 txs->txs_ndesc = segs_needed;
2131
2132 /* Set up offload parameters for this packet. */
2133 if (m0->m_pkthdr.csum_flags &
2134 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2135 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2136 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2137 if (wm_tx_offload(sc, txs, &cksumcmd,
2138 &cksumfields) != 0) {
2139 /* Error message already displayed. */
2140 bus_dmamap_unload(sc->sc_dmat, dmamap);
2141 continue;
2142 }
2143 } else {
2144 cksumcmd = 0;
2145 cksumfields = 0;
2146 }
2147
2148 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2149
2150 /* Sync the DMA map. */
2151 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2152 BUS_DMASYNC_PREWRITE);
2153
2154 /*
2155 * Initialize the transmit descriptor.
2156 */
2157 for (nexttx = sc->sc_txnext, seg = 0;
2158 seg < dmamap->dm_nsegs; seg++) {
2159 for (seglen = dmamap->dm_segs[seg].ds_len,
2160 curaddr = dmamap->dm_segs[seg].ds_addr;
2161 seglen != 0;
2162 curaddr += curlen, seglen -= curlen,
2163 nexttx = WM_NEXTTX(sc, nexttx)) {
2164 curlen = seglen;
2165
2166 /*
2167 * So says the Linux driver:
2168 * Work around for premature descriptor
2169 * write-backs in TSO mode. Append a
2170 * 4-byte sentinel descriptor.
2171 */
2172 if (use_tso &&
2173 seg == dmamap->dm_nsegs - 1 &&
2174 curlen > 8)
2175 curlen -= 4;
2176
2177 wm_set_dma_addr(
2178 &sc->sc_txdescs[nexttx].wtx_addr,
2179 curaddr);
2180 sc->sc_txdescs[nexttx].wtx_cmdlen =
2181 htole32(cksumcmd | curlen);
2182 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2183 0;
2184 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2185 cksumfields;
2186 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2187 lasttx = nexttx;
2188
2189 DPRINTF(WM_DEBUG_TX,
2190 ("%s: TX: desc %d: low 0x%08lx, "
2191 "len 0x%04x\n",
2192 sc->sc_dev.dv_xname, nexttx,
2193 curaddr & 0xffffffffUL, (unsigned)curlen));
2194 }
2195 }
2196
2197 KASSERT(lasttx != -1);
2198
2199 /*
2200 * Set up the command byte on the last descriptor of
2201 * the packet. If we're in the interrupt delay window,
2202 * delay the interrupt.
2203 */
2204 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2205 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2206
2207 #if 0 /* XXXJRT */
2208 /*
2209 * If VLANs are enabled and the packet has a VLAN tag, set
2210 * up the descriptor to encapsulate the packet for us.
2211 *
2212 * This is only valid on the last descriptor of the packet.
2213 */
2214 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2215 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2216 htole32(WTX_CMD_VLE);
2217 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2218 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2219 }
2220 #endif /* XXXJRT */
2221
2222 txs->txs_lastdesc = lasttx;
2223
2224 DPRINTF(WM_DEBUG_TX,
2225 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
2226 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2227
2228 /* Sync the descriptors we're using. */
2229 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2230 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2231
2232 /* Give the packet to the chip. */
2233 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2234
2235 DPRINTF(WM_DEBUG_TX,
2236 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
2237
2238 DPRINTF(WM_DEBUG_TX,
2239 ("%s: TX: finished transmitting packet, job %d\n",
2240 sc->sc_dev.dv_xname, sc->sc_txsnext));
2241
2242 /* Advance the tx pointer. */
2243 sc->sc_txfree -= txs->txs_ndesc;
2244 sc->sc_txnext = nexttx;
2245
2246 sc->sc_txsfree--;
2247 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2248
2249 #if NBPFILTER > 0
2250 /* Pass the packet to any BPF listeners. */
2251 if (ifp->if_bpf)
2252 bpf_mtap(ifp->if_bpf, m0);
2253 #endif /* NBPFILTER > 0 */
2254 }
2255
2256 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2257 /* No more slots; notify upper layer. */
2258 ifp->if_flags |= IFF_OACTIVE;
2259 }
2260
2261 if (sc->sc_txfree != ofree) {
2262 /* Set a watchdog timer in case the chip flakes out. */
2263 ifp->if_timer = 5;
2264 }
2265 }
2266
2267 /*
2268 * wm_watchdog: [ifnet interface function]
2269 *
2270 * Watchdog timer handler.
2271 */
2272 static void
2273 wm_watchdog(struct ifnet *ifp)
2274 {
2275 struct wm_softc *sc = ifp->if_softc;
2276
2277 /*
2278 * Since we're using delayed interrupts, sweep up
2279 * before we report an error.
2280 */
2281 wm_txintr(sc);
2282
2283 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2284 log(LOG_ERR,
2285 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2286 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
2287 sc->sc_txnext);
2288 ifp->if_oerrors++;
2289
2290 /* Reset the interface. */
2291 (void) wm_init(ifp);
2292 }
2293
2294 /* Try to get more packets going. */
2295 wm_start(ifp);
2296 }
2297
2298 /*
2299 * wm_ioctl: [ifnet interface function]
2300 *
2301 * Handle control requests from the operator.
2302 */
2303 static int
2304 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2305 {
2306 struct wm_softc *sc = ifp->if_softc;
2307 struct ifreq *ifr = (struct ifreq *) data;
2308 int s, error;
2309
2310 s = splnet();
2311
2312 switch (cmd) {
2313 case SIOCSIFMEDIA:
2314 case SIOCGIFMEDIA:
2315 /* Flow control requires full-duplex mode. */
2316 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2317 (ifr->ifr_media & IFM_FDX) == 0)
2318 ifr->ifr_media &= ~IFM_ETH_FMASK;
2319 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2320 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2321 /* We can do both TXPAUSE and RXPAUSE. */
2322 ifr->ifr_media |=
2323 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2324 }
2325 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2326 }
2327 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2328 break;
2329 default:
2330 error = ether_ioctl(ifp, cmd, data);
2331 if (error == ENETRESET) {
2332 /*
2333 * Multicast list has changed; set the hardware filter
2334 * accordingly.
2335 */
2336 if (ifp->if_flags & IFF_RUNNING)
2337 wm_set_filter(sc);
2338 error = 0;
2339 }
2340 break;
2341 }
2342
2343 /* Try to get more packets going. */
2344 wm_start(ifp);
2345
2346 splx(s);
2347 return (error);
2348 }
2349
2350 /*
2351 * wm_intr:
2352 *
2353 * Interrupt service routine.
2354 */
2355 static int
2356 wm_intr(void *arg)
2357 {
2358 struct wm_softc *sc = arg;
2359 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2360 uint32_t icr;
2361 int handled = 0;
2362
2363 while (1 /* CONSTCOND */) {
2364 icr = CSR_READ(sc, WMREG_ICR);
2365 if ((icr & sc->sc_icr) == 0)
2366 break;
2367 #if 0 /*NRND > 0*/
2368 if (RND_ENABLED(&sc->rnd_source))
2369 rnd_add_uint32(&sc->rnd_source, icr);
2370 #endif
2371
2372 handled = 1;
2373
2374 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2375 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2376 DPRINTF(WM_DEBUG_RX,
2377 ("%s: RX: got Rx intr 0x%08x\n",
2378 sc->sc_dev.dv_xname,
2379 icr & (ICR_RXDMT0|ICR_RXT0)));
2380 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2381 }
2382 #endif
2383 wm_rxintr(sc);
2384
2385 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2386 if (icr & ICR_TXDW) {
2387 DPRINTF(WM_DEBUG_TX,
2388 ("%s: TX: got TXDW interrupt\n",
2389 sc->sc_dev.dv_xname));
2390 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2391 }
2392 #endif
2393 wm_txintr(sc);
2394
2395 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2396 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2397 wm_linkintr(sc, icr);
2398 }
2399
2400 if (icr & ICR_RXO) {
2401 ifp->if_ierrors++;
2402 #if defined(WM_DEBUG)
2403 log(LOG_WARNING, "%s: Receive overrun\n",
2404 sc->sc_dev.dv_xname);
2405 #endif /* defined(WM_DEBUG) */
2406 }
2407 }
2408
2409 if (handled) {
2410 /* Try to get more packets going. */
2411 wm_start(ifp);
2412 }
2413
2414 return (handled);
2415 }
2416
2417 /*
2418 * wm_txintr:
2419 *
2420 * Helper; handle transmit interrupts.
2421 */
2422 static void
2423 wm_txintr(struct wm_softc *sc)
2424 {
2425 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2426 struct wm_txsoft *txs;
2427 uint8_t status;
2428 int i;
2429
2430 ifp->if_flags &= ~IFF_OACTIVE;
2431
2432 /*
2433 * Go through the Tx list and free mbufs for those
2434 * frames which have been transmitted.
2435 */
2436 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2437 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2438 txs = &sc->sc_txsoft[i];
2439
2440 DPRINTF(WM_DEBUG_TX,
2441 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
2442
2443 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2444 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2445
2446 status =
2447 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2448 if ((status & WTX_ST_DD) == 0) {
2449 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2450 BUS_DMASYNC_PREREAD);
2451 break;
2452 }
2453
2454 DPRINTF(WM_DEBUG_TX,
2455 ("%s: TX: job %d done: descs %d..%d\n",
2456 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
2457 txs->txs_lastdesc));
2458
2459 /*
2460 * XXX We should probably be using the statistics
2461 * XXX registers, but I don't know if they exist
2462 * XXX on chips before the i82544.
2463 */
2464
2465 #ifdef WM_EVENT_COUNTERS
2466 if (status & WTX_ST_TU)
2467 WM_EVCNT_INCR(&sc->sc_ev_tu);
2468 #endif /* WM_EVENT_COUNTERS */
2469
2470 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2471 ifp->if_oerrors++;
2472 if (status & WTX_ST_LC)
2473 log(LOG_WARNING, "%s: late collision\n",
2474 sc->sc_dev.dv_xname);
2475 else if (status & WTX_ST_EC) {
2476 ifp->if_collisions += 16;
2477 log(LOG_WARNING, "%s: excessive collisions\n",
2478 sc->sc_dev.dv_xname);
2479 }
2480 } else
2481 ifp->if_opackets++;
2482
2483 sc->sc_txfree += txs->txs_ndesc;
2484 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2485 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2486 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2487 m_freem(txs->txs_mbuf);
2488 txs->txs_mbuf = NULL;
2489 }
2490
2491 /* Update the dirty transmit buffer pointer. */
2492 sc->sc_txsdirty = i;
2493 DPRINTF(WM_DEBUG_TX,
2494 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
2495
2496 /*
2497 * If there are no more pending transmissions, cancel the watchdog
2498 * timer.
2499 */
2500 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2501 ifp->if_timer = 0;
2502 }
2503
2504 /*
2505 * wm_rxintr:
2506 *
2507 * Helper; handle receive interrupts.
2508 */
2509 static void
2510 wm_rxintr(struct wm_softc *sc)
2511 {
2512 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2513 struct wm_rxsoft *rxs;
2514 struct mbuf *m;
2515 int i, len;
2516 uint8_t status, errors;
2517
2518 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2519 rxs = &sc->sc_rxsoft[i];
2520
2521 DPRINTF(WM_DEBUG_RX,
2522 ("%s: RX: checking descriptor %d\n",
2523 sc->sc_dev.dv_xname, i));
2524
2525 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2526
2527 status = sc->sc_rxdescs[i].wrx_status;
2528 errors = sc->sc_rxdescs[i].wrx_errors;
2529 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2530
2531 if ((status & WRX_ST_DD) == 0) {
2532 /*
2533 * We have processed all of the receive descriptors.
2534 */
2535 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2536 break;
2537 }
2538
2539 if (__predict_false(sc->sc_rxdiscard)) {
2540 DPRINTF(WM_DEBUG_RX,
2541 ("%s: RX: discarding contents of descriptor %d\n",
2542 sc->sc_dev.dv_xname, i));
2543 WM_INIT_RXDESC(sc, i);
2544 if (status & WRX_ST_EOP) {
2545 /* Reset our state. */
2546 DPRINTF(WM_DEBUG_RX,
2547 ("%s: RX: resetting rxdiscard -> 0\n",
2548 sc->sc_dev.dv_xname));
2549 sc->sc_rxdiscard = 0;
2550 }
2551 continue;
2552 }
2553
2554 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2555 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2556
2557 m = rxs->rxs_mbuf;
2558
2559 /*
2560 * Add a new receive buffer to the ring, unless of
2561 * course the length is zero. Treat the latter as a
2562 * failed mapping.
2563 */
2564 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2565 /*
2566 * Failed, throw away what we've done so
2567 * far, and discard the rest of the packet.
2568 */
2569 ifp->if_ierrors++;
2570 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2571 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2572 WM_INIT_RXDESC(sc, i);
2573 if ((status & WRX_ST_EOP) == 0)
2574 sc->sc_rxdiscard = 1;
2575 if (sc->sc_rxhead != NULL)
2576 m_freem(sc->sc_rxhead);
2577 WM_RXCHAIN_RESET(sc);
2578 DPRINTF(WM_DEBUG_RX,
2579 ("%s: RX: Rx buffer allocation failed, "
2580 "dropping packet%s\n", sc->sc_dev.dv_xname,
2581 sc->sc_rxdiscard ? " (discard)" : ""));
2582 continue;
2583 }
2584
2585 WM_RXCHAIN_LINK(sc, m);
2586
2587 m->m_len = len;
2588
2589 DPRINTF(WM_DEBUG_RX,
2590 ("%s: RX: buffer at %p len %d\n",
2591 sc->sc_dev.dv_xname, m->m_data, len));
2592
2593 /*
2594 * If this is not the end of the packet, keep
2595 * looking.
2596 */
2597 if ((status & WRX_ST_EOP) == 0) {
2598 sc->sc_rxlen += len;
2599 DPRINTF(WM_DEBUG_RX,
2600 ("%s: RX: not yet EOP, rxlen -> %d\n",
2601 sc->sc_dev.dv_xname, sc->sc_rxlen));
2602 continue;
2603 }
2604
2605 /*
2606 * Okay, we have the entire packet now. The chip is
2607 * configured to include the FCS (not all chips can
2608 * be configured to strip it), so we need to trim it.
2609 */
2610 m->m_len -= ETHER_CRC_LEN;
2611
2612 *sc->sc_rxtailp = NULL;
2613 len = m->m_len + sc->sc_rxlen;
2614 m = sc->sc_rxhead;
2615
2616 WM_RXCHAIN_RESET(sc);
2617
2618 DPRINTF(WM_DEBUG_RX,
2619 ("%s: RX: have entire packet, len -> %d\n",
2620 sc->sc_dev.dv_xname, len));
2621
2622 /*
2623 * If an error occurred, update stats and drop the packet.
2624 */
2625 if (errors &
2626 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2627 ifp->if_ierrors++;
2628 if (errors & WRX_ER_SE)
2629 log(LOG_WARNING, "%s: symbol error\n",
2630 sc->sc_dev.dv_xname);
2631 else if (errors & WRX_ER_SEQ)
2632 log(LOG_WARNING, "%s: receive sequence error\n",
2633 sc->sc_dev.dv_xname);
2634 else if (errors & WRX_ER_CE)
2635 log(LOG_WARNING, "%s: CRC error\n",
2636 sc->sc_dev.dv_xname);
2637 m_freem(m);
2638 continue;
2639 }
2640
2641 /*
2642 * No errors. Receive the packet.
2643 */
2644 m->m_pkthdr.rcvif = ifp;
2645 m->m_pkthdr.len = len;
2646
2647 #if 0 /* XXXJRT */
2648 /*
2649 * If VLANs are enabled, VLAN packets have been unwrapped
2650 * for us. Associate the tag with the packet.
2651 */
2652 if ((status & WRX_ST_VP) != 0) {
2653 VLAN_INPUT_TAG(ifp, m,
2654 le16toh(sc->sc_rxdescs[i].wrx_special,
2655 continue);
2656 }
2657 #endif /* XXXJRT */
2658
2659 /*
2660 * Set up checksum info for this packet.
2661 */
2662 if ((status & WRX_ST_IXSM) == 0) {
2663 if (status & WRX_ST_IPCS) {
2664 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2665 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2666 if (errors & WRX_ER_IPE)
2667 m->m_pkthdr.csum_flags |=
2668 M_CSUM_IPv4_BAD;
2669 }
2670 if (status & WRX_ST_TCPCS) {
2671 /*
2672 * Note: we don't know if this was TCP or UDP,
2673 * so we just set both bits, and expect the
2674 * upper layers to deal.
2675 */
2676 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2677 m->m_pkthdr.csum_flags |=
2678 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2679 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2680 if (errors & WRX_ER_TCPE)
2681 m->m_pkthdr.csum_flags |=
2682 M_CSUM_TCP_UDP_BAD;
2683 }
2684 }
2685
2686 ifp->if_ipackets++;
2687
2688 #if NBPFILTER > 0
2689 /* Pass this up to any BPF listeners. */
2690 if (ifp->if_bpf)
2691 bpf_mtap(ifp->if_bpf, m);
2692 #endif /* NBPFILTER > 0 */
2693
2694 /* Pass it on. */
2695 (*ifp->if_input)(ifp, m);
2696 }
2697
2698 /* Update the receive pointer. */
2699 sc->sc_rxptr = i;
2700
2701 DPRINTF(WM_DEBUG_RX,
2702 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2703 }
2704
2705 /*
2706 * wm_linkintr:
2707 *
2708 * Helper; handle link interrupts.
2709 */
2710 static void
2711 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2712 {
2713 uint32_t status;
2714
2715 /*
2716 * If we get a link status interrupt on a 1000BASE-T
2717 * device, just fall into the normal MII tick path.
2718 */
2719 if (sc->sc_flags & WM_F_HAS_MII) {
2720 if (icr & ICR_LSC) {
2721 DPRINTF(WM_DEBUG_LINK,
2722 ("%s: LINK: LSC -> mii_tick\n",
2723 sc->sc_dev.dv_xname));
2724 mii_tick(&sc->sc_mii);
2725 } else if (icr & ICR_RXSEQ) {
2726 DPRINTF(WM_DEBUG_LINK,
2727 ("%s: LINK Receive sequence error\n",
2728 sc->sc_dev.dv_xname));
2729 }
2730 return;
2731 }
2732
2733 /*
2734 * If we are now receiving /C/, check for link again in
2735 * a couple of link clock ticks.
2736 */
2737 if (icr & ICR_RXCFG) {
2738 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2739 sc->sc_dev.dv_xname));
2740 sc->sc_tbi_anstate = 2;
2741 }
2742
2743 if (icr & ICR_LSC) {
2744 status = CSR_READ(sc, WMREG_STATUS);
2745 if (status & STATUS_LU) {
2746 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2747 sc->sc_dev.dv_xname,
2748 (status & STATUS_FD) ? "FDX" : "HDX"));
2749 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2750 sc->sc_fcrtl &= ~FCRTL_XONE;
2751 if (status & STATUS_FD)
2752 sc->sc_tctl |=
2753 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2754 else
2755 sc->sc_tctl |=
2756 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2757 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2758 sc->sc_fcrtl |= FCRTL_XONE;
2759 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2760 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2761 WMREG_OLD_FCRTL : WMREG_FCRTL,
2762 sc->sc_fcrtl);
2763 sc->sc_tbi_linkup = 1;
2764 } else {
2765 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2766 sc->sc_dev.dv_xname));
2767 sc->sc_tbi_linkup = 0;
2768 }
2769 sc->sc_tbi_anstate = 2;
2770 wm_tbi_set_linkled(sc);
2771 } else if (icr & ICR_RXSEQ) {
2772 DPRINTF(WM_DEBUG_LINK,
2773 ("%s: LINK: Receive sequence error\n",
2774 sc->sc_dev.dv_xname));
2775 }
2776 }
2777
2778 /*
2779 * wm_tick:
2780 *
2781 * One second timer, used to check link status, sweep up
2782 * completed transmit jobs, etc.
2783 */
2784 static void
2785 wm_tick(void *arg)
2786 {
2787 struct wm_softc *sc = arg;
2788 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2789 int s;
2790
2791 s = splnet();
2792
2793 if (sc->sc_type >= WM_T_82542_2_1) {
2794 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2795 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2796 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2797 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2798 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2799 }
2800
2801 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2802 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2803
2804
2805 if (sc->sc_flags & WM_F_HAS_MII)
2806 mii_tick(&sc->sc_mii);
2807 else
2808 wm_tbi_check_link(sc);
2809
2810 splx(s);
2811
2812 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2813 }
2814
2815 /*
2816 * wm_reset:
2817 *
2818 * Reset the i82542 chip.
2819 */
2820 static void
2821 wm_reset(struct wm_softc *sc)
2822 {
2823 uint32_t reg;
2824
2825 /*
2826 * Allocate on-chip memory according to the MTU size.
2827 * The Packet Buffer Allocation register must be written
2828 * before the chip is reset.
2829 */
2830 switch (sc->sc_type) {
2831 case WM_T_82547:
2832 case WM_T_82547_2:
2833 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2834 PBA_22K : PBA_30K;
2835 sc->sc_txfifo_head = 0;
2836 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2837 sc->sc_txfifo_size =
2838 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2839 sc->sc_txfifo_stall = 0;
2840 break;
2841 case WM_T_82571:
2842 case WM_T_82572:
2843 case WM_T_80003:
2844 sc->sc_pba = PBA_32K;
2845 break;
2846 case WM_T_82573:
2847 sc->sc_pba = PBA_12K;
2848 break;
2849 case WM_T_ICH8:
2850 sc->sc_pba = PBA_8K;
2851 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2852 break;
2853 case WM_T_ICH9:
2854 sc->sc_pba = PBA_10K;
2855 break;
2856 default:
2857 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2858 PBA_40K : PBA_48K;
2859 break;
2860 }
2861 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2862
2863 if (sc->sc_flags & WM_F_PCIE) {
2864 int timeout = 800;
2865
2866 sc->sc_ctrl |= CTRL_GIO_M_DIS;
2867 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2868
2869 while (timeout) {
2870 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2871 break;
2872 delay(100);
2873 }
2874 }
2875
2876 /* clear interrupt */
2877 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2878
2879 /*
2880 * 82541 Errata 29? & 82547 Errata 28?
2881 * See also the description about PHY_RST bit in CTRL register
2882 * in 8254x_GBe_SDM.pdf.
2883 */
2884 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2885 CSR_WRITE(sc, WMREG_CTRL,
2886 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2887 delay(5000);
2888 }
2889
2890 switch (sc->sc_type) {
2891 case WM_T_82544:
2892 case WM_T_82540:
2893 case WM_T_82545:
2894 case WM_T_82546:
2895 case WM_T_82541:
2896 case WM_T_82541_2:
2897 /*
2898 * On some chipsets, a reset through a memory-mapped write
2899 * cycle can cause the chip to reset before completing the
2900 * write cycle. This causes major headache that can be
2901 * avoided by issuing the reset via indirect register writes
2902 * through I/O space.
2903 *
2904 * So, if we successfully mapped the I/O BAR at attach time,
2905 * use that. Otherwise, try our luck with a memory-mapped
2906 * reset.
2907 */
2908 if (sc->sc_flags & WM_F_IOH_VALID)
2909 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2910 else
2911 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2912 break;
2913
2914 case WM_T_82545_3:
2915 case WM_T_82546_3:
2916 /* Use the shadow control register on these chips. */
2917 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2918 break;
2919
2920 case WM_T_ICH8:
2921 case WM_T_ICH9:
2922 wm_get_swfwhw_semaphore(sc);
2923 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2924 delay(10000);
2925
2926 default:
2927 /* Everything else can safely use the documented method. */
2928 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2929 break;
2930 }
2931 delay(10000);
2932
2933 /* reload EEPROM */
2934 switch(sc->sc_type) {
2935 case WM_T_82542_2_0:
2936 case WM_T_82542_2_1:
2937 case WM_T_82543:
2938 case WM_T_82544:
2939 delay(10);
2940 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2942 delay(2000);
2943 break;
2944 case WM_T_82541:
2945 case WM_T_82541_2:
2946 case WM_T_82547:
2947 case WM_T_82547_2:
2948 delay(20000);
2949 break;
2950 case WM_T_82573:
2951 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
2952 delay(10);
2953 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2954 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2955 }
2956 /* FALLTHROUGH */
2957 default:
2958 /* check EECD_EE_AUTORD */
2959 wm_get_auto_rd_done(sc);
2960 }
2961
2962 #if 0
2963 for (i = 0; i < 1000; i++) {
2964 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
2965 return;
2966 }
2967 delay(20);
2968 }
2969
2970 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2971 log(LOG_ERR, "%s: reset failed to complete\n",
2972 sc->sc_dev.dv_xname);
2973 #endif
2974 }
2975
2976 /*
2977 * wm_init: [ifnet interface function]
2978 *
2979 * Initialize the interface. Must be called at splnet().
2980 */
2981 static int
2982 wm_init(struct ifnet *ifp)
2983 {
2984 struct wm_softc *sc = ifp->if_softc;
2985 struct wm_rxsoft *rxs;
2986 int i, error = 0;
2987 uint32_t reg;
2988
2989 /*
2990 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2991 * There is a small but measurable benefit to avoiding the adjusment
2992 * of the descriptor so that the headers are aligned, for normal mtu,
2993 * on such platforms. One possibility is that the DMA itself is
2994 * slightly more efficient if the front of the entire packet (instead
2995 * of the front of the headers) is aligned.
2996 *
2997 * Note we must always set align_tweak to 0 if we are using
2998 * jumbo frames.
2999 */
3000 #ifdef __NO_STRICT_ALIGNMENT
3001 sc->sc_align_tweak = 0;
3002 #else
3003 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3004 sc->sc_align_tweak = 0;
3005 else
3006 sc->sc_align_tweak = 2;
3007 #endif /* __NO_STRICT_ALIGNMENT */
3008
3009 /* Cancel any pending I/O. */
3010 wm_stop(ifp, 0);
3011
3012 /* update statistics before reset */
3013 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3014 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3015
3016 /* Reset the chip to a known state. */
3017 wm_reset(sc);
3018
3019 /* Initialize the transmit descriptor ring. */
3020 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3021 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3022 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3023 sc->sc_txfree = WM_NTXDESC(sc);
3024 sc->sc_txnext = 0;
3025
3026 if (sc->sc_type < WM_T_82543) {
3027 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3028 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3029 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3030 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3031 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3032 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3033 } else {
3034 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3035 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3036 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3037 CSR_WRITE(sc, WMREG_TDH, 0);
3038 CSR_WRITE(sc, WMREG_TDT, 0);
3039 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3040 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3041
3042 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3043 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3044 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3045 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3046 }
3047 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3048 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3049
3050 /* Initialize the transmit job descriptors. */
3051 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3052 sc->sc_txsoft[i].txs_mbuf = NULL;
3053 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3054 sc->sc_txsnext = 0;
3055 sc->sc_txsdirty = 0;
3056
3057 /*
3058 * Initialize the receive descriptor and receive job
3059 * descriptor rings.
3060 */
3061 if (sc->sc_type < WM_T_82543) {
3062 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3063 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3064 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3065 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3066 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3067 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3068
3069 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3070 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3071 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3072 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3073 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3074 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3075 } else {
3076 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3077 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3078 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3079 CSR_WRITE(sc, WMREG_RDH, 0);
3080 CSR_WRITE(sc, WMREG_RDT, 0);
3081 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3082 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3083 }
3084 for (i = 0; i < WM_NRXDESC; i++) {
3085 rxs = &sc->sc_rxsoft[i];
3086 if (rxs->rxs_mbuf == NULL) {
3087 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3088 log(LOG_ERR, "%s: unable to allocate or map rx "
3089 "buffer %d, error = %d\n",
3090 sc->sc_dev.dv_xname, i, error);
3091 /*
3092 * XXX Should attempt to run with fewer receive
3093 * XXX buffers instead of just failing.
3094 */
3095 wm_rxdrain(sc);
3096 goto out;
3097 }
3098 } else
3099 WM_INIT_RXDESC(sc, i);
3100 }
3101 sc->sc_rxptr = 0;
3102 sc->sc_rxdiscard = 0;
3103 WM_RXCHAIN_RESET(sc);
3104
3105 /*
3106 * Clear out the VLAN table -- we don't use it (yet).
3107 */
3108 CSR_WRITE(sc, WMREG_VET, 0);
3109 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3110 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3111
3112 /*
3113 * Set up flow-control parameters.
3114 *
3115 * XXX Values could probably stand some tuning.
3116 */
3117 if (sc->sc_type != WM_T_ICH8) {
3118 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3119 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3120 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3121 }
3122
3123 sc->sc_fcrtl = FCRTL_DFLT;
3124 if (sc->sc_type < WM_T_82543) {
3125 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3126 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3127 } else {
3128 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3129 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3130 }
3131 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3132
3133 #if 0 /* XXXJRT */
3134 /* Deal with VLAN enables. */
3135 if (VLAN_ATTACHED(&sc->sc_ethercom))
3136 sc->sc_ctrl |= CTRL_VME;
3137 else
3138 #endif /* XXXJRT */
3139 sc->sc_ctrl &= ~CTRL_VME;
3140
3141 /* Write the control registers. */
3142 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3143 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3144 int val;
3145 val = CSR_READ(sc, WMREG_CTRL_EXT);
3146 val &= ~CTRL_EXT_LINK_MODE_MASK;
3147 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3148
3149 /* Bypass RX and TX FIFO's */
3150 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3151 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3152 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3153
3154 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3155 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3156 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3157 /*
3158 * Set the mac to wait the maximum time between each
3159 * iteration and increase the max iterations when
3160 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3161 */
3162 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3163 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3164 val |= 0x3F;
3165 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3166 }
3167 #if 0
3168 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3169 #endif
3170
3171 /*
3172 * Set up checksum offload parameters.
3173 */
3174 reg = CSR_READ(sc, WMREG_RXCSUM);
3175 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3176 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3177 reg |= RXCSUM_IPOFL;
3178 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3179 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3180 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3181 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3182 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3183
3184 /*
3185 * Set up the interrupt registers.
3186 */
3187 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3188 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3189 ICR_RXO | ICR_RXT0;
3190 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3191 sc->sc_icr |= ICR_RXCFG;
3192 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3193
3194 /* Set up the inter-packet gap. */
3195 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3196
3197 if (sc->sc_type >= WM_T_82543) {
3198 /*
3199 * Set up the interrupt throttling register (units of 256ns)
3200 * Note that a footnote in Intel's documentation says this
3201 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3202 * or 10Mbit mode. Empirically, it appears to be the case
3203 * that that is also true for the 1024ns units of the other
3204 * interrupt-related timer registers -- so, really, we ought
3205 * to divide this value by 4 when the link speed is low.
3206 *
3207 * XXX implement this division at link speed change!
3208 */
3209
3210 /*
3211 * For N interrupts/sec, set this value to:
3212 * 1000000000 / (N * 256). Note that we set the
3213 * absolute and packet timer values to this value
3214 * divided by 4 to get "simple timer" behavior.
3215 */
3216
3217 sc->sc_itr = 1500; /* 2604 ints/sec */
3218 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3219 }
3220
3221 #if 0 /* XXXJRT */
3222 /* Set the VLAN ethernetype. */
3223 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3224 #endif
3225
3226 /*
3227 * Set up the transmit control register; we start out with
3228 * a collision distance suitable for FDX, but update it whe
3229 * we resolve the media type.
3230 */
3231 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3232 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3233 if (sc->sc_type >= WM_T_82571)
3234 sc->sc_tctl |= TCTL_MULR;
3235 if (sc->sc_type >= WM_T_80003)
3236 sc->sc_tctl |= TCTL_RTLC;
3237 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3238
3239 /* Set the media. */
3240 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3241 goto out;
3242
3243 /*
3244 * Set up the receive control register; we actually program
3245 * the register when we set the receive filter. Use multicast
3246 * address offset type 0.
3247 *
3248 * Only the i82544 has the ability to strip the incoming
3249 * CRC, so we don't enable that feature.
3250 */
3251 sc->sc_mchash_type = 0;
3252 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3253 | RCTL_MO(sc->sc_mchash_type);
3254
3255 /* 82573 doesn't support jumbo frame */
3256 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3257 sc->sc_rctl |= RCTL_LPE;
3258
3259 if (MCLBYTES == 2048) {
3260 sc->sc_rctl |= RCTL_2k;
3261 } else {
3262 if (sc->sc_type >= WM_T_82543) {
3263 switch(MCLBYTES) {
3264 case 4096:
3265 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3266 break;
3267 case 8192:
3268 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3269 break;
3270 case 16384:
3271 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3272 break;
3273 default:
3274 panic("wm_init: MCLBYTES %d unsupported",
3275 MCLBYTES);
3276 break;
3277 }
3278 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3279 }
3280
3281 /* Set the receive filter. */
3282 wm_set_filter(sc);
3283
3284 /* Start the one second link check clock. */
3285 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3286
3287 /* ...all done! */
3288 ifp->if_flags |= IFF_RUNNING;
3289 ifp->if_flags &= ~IFF_OACTIVE;
3290
3291 out:
3292 if (error)
3293 log(LOG_ERR, "%s: interface not running\n",
3294 sc->sc_dev.dv_xname);
3295 return (error);
3296 }
3297
3298 /*
3299 * wm_rxdrain:
3300 *
3301 * Drain the receive queue.
3302 */
3303 static void
3304 wm_rxdrain(struct wm_softc *sc)
3305 {
3306 struct wm_rxsoft *rxs;
3307 int i;
3308
3309 for (i = 0; i < WM_NRXDESC; i++) {
3310 rxs = &sc->sc_rxsoft[i];
3311 if (rxs->rxs_mbuf != NULL) {
3312 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3313 m_freem(rxs->rxs_mbuf);
3314 rxs->rxs_mbuf = NULL;
3315 }
3316 }
3317 }
3318
3319 /*
3320 * wm_stop: [ifnet interface function]
3321 *
3322 * Stop transmission on the interface.
3323 */
3324 static void
3325 wm_stop(struct ifnet *ifp, int disable)
3326 {
3327 struct wm_softc *sc = ifp->if_softc;
3328 struct wm_txsoft *txs;
3329 int i;
3330
3331 /* Stop the one second clock. */
3332 callout_stop(&sc->sc_tick_ch);
3333
3334 /* Stop the 82547 Tx FIFO stall check timer. */
3335 if (sc->sc_type == WM_T_82547)
3336 callout_stop(&sc->sc_txfifo_ch);
3337
3338 if (sc->sc_flags & WM_F_HAS_MII) {
3339 /* Down the MII. */
3340 mii_down(&sc->sc_mii);
3341 }
3342
3343 /* Stop the transmit and receive processes. */
3344 CSR_WRITE(sc, WMREG_TCTL, 0);
3345 CSR_WRITE(sc, WMREG_RCTL, 0);
3346
3347 /*
3348 * Clear the interrupt mask to ensure the device cannot assert its
3349 * interrupt line.
3350 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3351 * any currently pending or shared interrupt.
3352 */
3353 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3354 sc->sc_icr = 0;
3355
3356 /* Release any queued transmit buffers. */
3357 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3358 txs = &sc->sc_txsoft[i];
3359 if (txs->txs_mbuf != NULL) {
3360 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3361 m_freem(txs->txs_mbuf);
3362 txs->txs_mbuf = NULL;
3363 }
3364 }
3365
3366 if (disable)
3367 wm_rxdrain(sc);
3368
3369 /* Mark the interface as down and cancel the watchdog timer. */
3370 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3371 ifp->if_timer = 0;
3372 }
3373
3374 void
3375 wm_get_auto_rd_done(struct wm_softc *sc)
3376 {
3377 int i;
3378
3379 /* wait for eeprom to reload */
3380 switch (sc->sc_type) {
3381 case WM_T_82571:
3382 case WM_T_82572:
3383 case WM_T_82573:
3384 case WM_T_80003:
3385 case WM_T_ICH8:
3386 case WM_T_ICH9:
3387 for (i = 10; i > 0; i--) {
3388 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3389 break;
3390 delay(1000);
3391 }
3392 if (i == 0) {
3393 log(LOG_ERR, "%s: auto read from eeprom failed to "
3394 "complete\n", sc->sc_dev.dv_xname);
3395 }
3396 break;
3397 default:
3398 delay(5000);
3399 break;
3400 }
3401
3402 /* Phy configuration starts after EECD_AUTO_RD is set */
3403 if (sc->sc_type == WM_T_82573)
3404 delay(25000);
3405 }
3406
3407 /*
3408 * wm_acquire_eeprom:
3409 *
3410 * Perform the EEPROM handshake required on some chips.
3411 */
3412 static int
3413 wm_acquire_eeprom(struct wm_softc *sc)
3414 {
3415 uint32_t reg;
3416 int x;
3417 int ret = 0;
3418
3419 /* always success */
3420 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3421 return 0;
3422
3423 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3424 ret = wm_get_swfwhw_semaphore(sc);
3425 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3426 /* this will also do wm_get_swsm_semaphore() if needed */
3427 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3428 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3429 ret = wm_get_swsm_semaphore(sc);
3430 }
3431
3432 if (ret)
3433 return 1;
3434
3435 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3436 reg = CSR_READ(sc, WMREG_EECD);
3437
3438 /* Request EEPROM access. */
3439 reg |= EECD_EE_REQ;
3440 CSR_WRITE(sc, WMREG_EECD, reg);
3441
3442 /* ..and wait for it to be granted. */
3443 for (x = 0; x < 1000; x++) {
3444 reg = CSR_READ(sc, WMREG_EECD);
3445 if (reg & EECD_EE_GNT)
3446 break;
3447 delay(5);
3448 }
3449 if ((reg & EECD_EE_GNT) == 0) {
3450 aprint_error("%s: could not acquire EEPROM GNT\n",
3451 sc->sc_dev.dv_xname);
3452 reg &= ~EECD_EE_REQ;
3453 CSR_WRITE(sc, WMREG_EECD, reg);
3454 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3455 wm_put_swfwhw_semaphore(sc);
3456 if (sc->sc_flags & WM_F_SWFW_SYNC)
3457 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3458 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3459 wm_put_swsm_semaphore(sc);
3460 return (1);
3461 }
3462 }
3463
3464 return (0);
3465 }
3466
3467 /*
3468 * wm_release_eeprom:
3469 *
3470 * Release the EEPROM mutex.
3471 */
3472 static void
3473 wm_release_eeprom(struct wm_softc *sc)
3474 {
3475 uint32_t reg;
3476
3477 /* always success */
3478 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3479 return;
3480
3481 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3482 reg = CSR_READ(sc, WMREG_EECD);
3483 reg &= ~EECD_EE_REQ;
3484 CSR_WRITE(sc, WMREG_EECD, reg);
3485 }
3486
3487 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3488 wm_put_swfwhw_semaphore(sc);
3489 if (sc->sc_flags & WM_F_SWFW_SYNC)
3490 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3491 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3492 wm_put_swsm_semaphore(sc);
3493 }
3494
3495 /*
3496 * wm_eeprom_sendbits:
3497 *
3498 * Send a series of bits to the EEPROM.
3499 */
3500 static void
3501 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3502 {
3503 uint32_t reg;
3504 int x;
3505
3506 reg = CSR_READ(sc, WMREG_EECD);
3507
3508 for (x = nbits; x > 0; x--) {
3509 if (bits & (1U << (x - 1)))
3510 reg |= EECD_DI;
3511 else
3512 reg &= ~EECD_DI;
3513 CSR_WRITE(sc, WMREG_EECD, reg);
3514 delay(2);
3515 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3516 delay(2);
3517 CSR_WRITE(sc, WMREG_EECD, reg);
3518 delay(2);
3519 }
3520 }
3521
3522 /*
3523 * wm_eeprom_recvbits:
3524 *
3525 * Receive a series of bits from the EEPROM.
3526 */
3527 static void
3528 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3529 {
3530 uint32_t reg, val;
3531 int x;
3532
3533 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3534
3535 val = 0;
3536 for (x = nbits; x > 0; x--) {
3537 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3538 delay(2);
3539 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3540 val |= (1U << (x - 1));
3541 CSR_WRITE(sc, WMREG_EECD, reg);
3542 delay(2);
3543 }
3544 *valp = val;
3545 }
3546
3547 /*
3548 * wm_read_eeprom_uwire:
3549 *
3550 * Read a word from the EEPROM using the MicroWire protocol.
3551 */
3552 static int
3553 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3554 {
3555 uint32_t reg, val;
3556 int i;
3557
3558 for (i = 0; i < wordcnt; i++) {
3559 /* Clear SK and DI. */
3560 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3561 CSR_WRITE(sc, WMREG_EECD, reg);
3562
3563 /* Set CHIP SELECT. */
3564 reg |= EECD_CS;
3565 CSR_WRITE(sc, WMREG_EECD, reg);
3566 delay(2);
3567
3568 /* Shift in the READ command. */
3569 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3570
3571 /* Shift in address. */
3572 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3573
3574 /* Shift out the data. */
3575 wm_eeprom_recvbits(sc, &val, 16);
3576 data[i] = val & 0xffff;
3577
3578 /* Clear CHIP SELECT. */
3579 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3580 CSR_WRITE(sc, WMREG_EECD, reg);
3581 delay(2);
3582 }
3583
3584 return (0);
3585 }
3586
3587 /*
3588 * wm_spi_eeprom_ready:
3589 *
3590 * Wait for a SPI EEPROM to be ready for commands.
3591 */
3592 static int
3593 wm_spi_eeprom_ready(struct wm_softc *sc)
3594 {
3595 uint32_t val;
3596 int usec;
3597
3598 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3599 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3600 wm_eeprom_recvbits(sc, &val, 8);
3601 if ((val & SPI_SR_RDY) == 0)
3602 break;
3603 }
3604 if (usec >= SPI_MAX_RETRIES) {
3605 aprint_error("%s: EEPROM failed to become ready\n",
3606 sc->sc_dev.dv_xname);
3607 return (1);
3608 }
3609 return (0);
3610 }
3611
3612 /*
3613 * wm_read_eeprom_spi:
3614 *
3615 * Read a work from the EEPROM using the SPI protocol.
3616 */
3617 static int
3618 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3619 {
3620 uint32_t reg, val;
3621 int i;
3622 uint8_t opc;
3623
3624 /* Clear SK and CS. */
3625 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3626 CSR_WRITE(sc, WMREG_EECD, reg);
3627 delay(2);
3628
3629 if (wm_spi_eeprom_ready(sc))
3630 return (1);
3631
3632 /* Toggle CS to flush commands. */
3633 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3634 delay(2);
3635 CSR_WRITE(sc, WMREG_EECD, reg);
3636 delay(2);
3637
3638 opc = SPI_OPC_READ;
3639 if (sc->sc_ee_addrbits == 8 && word >= 128)
3640 opc |= SPI_OPC_A8;
3641
3642 wm_eeprom_sendbits(sc, opc, 8);
3643 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3644
3645 for (i = 0; i < wordcnt; i++) {
3646 wm_eeprom_recvbits(sc, &val, 16);
3647 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3648 }
3649
3650 /* Raise CS and clear SK. */
3651 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3652 CSR_WRITE(sc, WMREG_EECD, reg);
3653 delay(2);
3654
3655 return (0);
3656 }
3657
3658 #define EEPROM_CHECKSUM 0xBABA
3659 #define EEPROM_SIZE 0x0040
3660
3661 /*
3662 * wm_validate_eeprom_checksum
3663 *
3664 * The checksum is defined as the sum of the first 64 (16 bit) words.
3665 */
3666 static int
3667 wm_validate_eeprom_checksum(struct wm_softc *sc)
3668 {
3669 uint16_t checksum;
3670 uint16_t eeprom_data;
3671 int i;
3672
3673 checksum = 0;
3674
3675 for (i = 0; i < EEPROM_SIZE; i++) {
3676 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3677 return 1;
3678 checksum += eeprom_data;
3679 }
3680
3681 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3682 return 1;
3683
3684 return 0;
3685 }
3686
3687 /*
3688 * wm_read_eeprom:
3689 *
3690 * Read data from the serial EEPROM.
3691 */
3692 static int
3693 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3694 {
3695 int rv;
3696
3697 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3698 return 1;
3699
3700 if (wm_acquire_eeprom(sc))
3701 return 1;
3702
3703 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3704 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3705 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3706 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3707 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3708 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3709 else
3710 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3711
3712 wm_release_eeprom(sc);
3713 return rv;
3714 }
3715
3716 static int
3717 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3718 uint16_t *data)
3719 {
3720 int i, eerd = 0;
3721 int error = 0;
3722
3723 for (i = 0; i < wordcnt; i++) {
3724 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3725
3726 CSR_WRITE(sc, WMREG_EERD, eerd);
3727 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3728 if (error != 0)
3729 break;
3730
3731 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3732 }
3733
3734 return error;
3735 }
3736
3737 static int
3738 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3739 {
3740 uint32_t attempts = 100000;
3741 uint32_t i, reg = 0;
3742 int32_t done = -1;
3743
3744 for (i = 0; i < attempts; i++) {
3745 reg = CSR_READ(sc, rw);
3746
3747 if (reg & EERD_DONE) {
3748 done = 0;
3749 break;
3750 }
3751 delay(5);
3752 }
3753
3754 return done;
3755 }
3756
3757 /*
3758 * wm_add_rxbuf:
3759 *
3760 * Add a receive buffer to the indiciated descriptor.
3761 */
3762 static int
3763 wm_add_rxbuf(struct wm_softc *sc, int idx)
3764 {
3765 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3766 struct mbuf *m;
3767 int error;
3768
3769 MGETHDR(m, M_DONTWAIT, MT_DATA);
3770 if (m == NULL)
3771 return (ENOBUFS);
3772
3773 MCLGET(m, M_DONTWAIT);
3774 if ((m->m_flags & M_EXT) == 0) {
3775 m_freem(m);
3776 return (ENOBUFS);
3777 }
3778
3779 if (rxs->rxs_mbuf != NULL)
3780 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3781
3782 rxs->rxs_mbuf = m;
3783
3784 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3785 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3786 BUS_DMA_READ|BUS_DMA_NOWAIT);
3787 if (error) {
3788 /* XXX XXX XXX */
3789 printf("%s: unable to load rx DMA map %d, error = %d\n",
3790 sc->sc_dev.dv_xname, idx, error);
3791 panic("wm_add_rxbuf");
3792 }
3793
3794 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3795 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3796
3797 WM_INIT_RXDESC(sc, idx);
3798
3799 return (0);
3800 }
3801
3802 /*
3803 * wm_set_ral:
3804 *
3805 * Set an entery in the receive address list.
3806 */
3807 static void
3808 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3809 {
3810 uint32_t ral_lo, ral_hi;
3811
3812 if (enaddr != NULL) {
3813 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3814 (enaddr[3] << 24);
3815 ral_hi = enaddr[4] | (enaddr[5] << 8);
3816 ral_hi |= RAL_AV;
3817 } else {
3818 ral_lo = 0;
3819 ral_hi = 0;
3820 }
3821
3822 if (sc->sc_type >= WM_T_82544) {
3823 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3824 ral_lo);
3825 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3826 ral_hi);
3827 } else {
3828 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3829 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3830 }
3831 }
3832
3833 /*
3834 * wm_mchash:
3835 *
3836 * Compute the hash of the multicast address for the 4096-bit
3837 * multicast filter.
3838 */
3839 static uint32_t
3840 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3841 {
3842 static const int lo_shift[4] = { 4, 3, 2, 0 };
3843 static const int hi_shift[4] = { 4, 5, 6, 8 };
3844 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3845 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3846 uint32_t hash;
3847
3848 if (sc->sc_type == WM_T_ICH8) {
3849 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3850 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3851 return (hash & 0x3ff);
3852 }
3853 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3854 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3855
3856 return (hash & 0xfff);
3857 }
3858
3859 /*
3860 * wm_set_filter:
3861 *
3862 * Set up the receive filter.
3863 */
3864 static void
3865 wm_set_filter(struct wm_softc *sc)
3866 {
3867 struct ethercom *ec = &sc->sc_ethercom;
3868 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3869 struct ether_multi *enm;
3870 struct ether_multistep step;
3871 bus_addr_t mta_reg;
3872 uint32_t hash, reg, bit;
3873 int i, size;
3874
3875 if (sc->sc_type >= WM_T_82544)
3876 mta_reg = WMREG_CORDOVA_MTA;
3877 else
3878 mta_reg = WMREG_MTA;
3879
3880 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3881
3882 if (ifp->if_flags & IFF_BROADCAST)
3883 sc->sc_rctl |= RCTL_BAM;
3884 if (ifp->if_flags & IFF_PROMISC) {
3885 sc->sc_rctl |= RCTL_UPE;
3886 goto allmulti;
3887 }
3888
3889 /*
3890 * Set the station address in the first RAL slot, and
3891 * clear the remaining slots.
3892 */
3893 if (sc->sc_type == WM_T_ICH8)
3894 size = WM_ICH8_RAL_TABSIZE;
3895 else
3896 size = WM_RAL_TABSIZE;
3897 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3898 for (i = 1; i < size; i++)
3899 wm_set_ral(sc, NULL, i);
3900
3901 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3902 size = WM_ICH8_MC_TABSIZE;
3903 else
3904 size = WM_MC_TABSIZE;
3905 /* Clear out the multicast table. */
3906 for (i = 0; i < size; i++)
3907 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3908
3909 ETHER_FIRST_MULTI(step, ec, enm);
3910 while (enm != NULL) {
3911 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3912 /*
3913 * We must listen to a range of multicast addresses.
3914 * For now, just accept all multicasts, rather than
3915 * trying to set only those filter bits needed to match
3916 * the range. (At this time, the only use of address
3917 * ranges is for IP multicast routing, for which the
3918 * range is big enough to require all bits set.)
3919 */
3920 goto allmulti;
3921 }
3922
3923 hash = wm_mchash(sc, enm->enm_addrlo);
3924
3925 reg = (hash >> 5);
3926 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3927 reg &= 0x1f;
3928 else
3929 reg &= 0x7f;
3930 bit = hash & 0x1f;
3931
3932 hash = CSR_READ(sc, mta_reg + (reg << 2));
3933 hash |= 1U << bit;
3934
3935 /* XXX Hardware bug?? */
3936 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3937 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3938 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3939 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3940 } else
3941 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3942
3943 ETHER_NEXT_MULTI(step, enm);
3944 }
3945
3946 ifp->if_flags &= ~IFF_ALLMULTI;
3947 goto setit;
3948
3949 allmulti:
3950 ifp->if_flags |= IFF_ALLMULTI;
3951 sc->sc_rctl |= RCTL_MPE;
3952
3953 setit:
3954 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3955 }
3956
3957 /*
3958 * wm_tbi_mediainit:
3959 *
3960 * Initialize media for use on 1000BASE-X devices.
3961 */
3962 static void
3963 wm_tbi_mediainit(struct wm_softc *sc)
3964 {
3965 const char *sep = "";
3966
3967 if (sc->sc_type < WM_T_82543)
3968 sc->sc_tipg = TIPG_WM_DFLT;
3969 else
3970 sc->sc_tipg = TIPG_LG_DFLT;
3971
3972 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3973 wm_tbi_mediastatus);
3974
3975 /*
3976 * SWD Pins:
3977 *
3978 * 0 = Link LED (output)
3979 * 1 = Loss Of Signal (input)
3980 */
3981 sc->sc_ctrl |= CTRL_SWDPIO(0);
3982 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3983
3984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3985
3986 #define ADD(ss, mm, dd) \
3987 do { \
3988 aprint_normal("%s%s", sep, ss); \
3989 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3990 sep = ", "; \
3991 } while (/*CONSTCOND*/0)
3992
3993 aprint_normal("%s: ", sc->sc_dev.dv_xname);
3994 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3995 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3996 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3997 aprint_normal("\n");
3998
3999 #undef ADD
4000
4001 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4002 }
4003
4004 /*
4005 * wm_tbi_mediastatus: [ifmedia interface function]
4006 *
4007 * Get the current interface media status on a 1000BASE-X device.
4008 */
4009 static void
4010 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4011 {
4012 struct wm_softc *sc = ifp->if_softc;
4013 uint32_t ctrl;
4014
4015 ifmr->ifm_status = IFM_AVALID;
4016 ifmr->ifm_active = IFM_ETHER;
4017
4018 if (sc->sc_tbi_linkup == 0) {
4019 ifmr->ifm_active |= IFM_NONE;
4020 return;
4021 }
4022
4023 ifmr->ifm_status |= IFM_ACTIVE;
4024 ifmr->ifm_active |= IFM_1000_SX;
4025 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4026 ifmr->ifm_active |= IFM_FDX;
4027 ctrl = CSR_READ(sc, WMREG_CTRL);
4028 if (ctrl & CTRL_RFCE)
4029 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4030 if (ctrl & CTRL_TFCE)
4031 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4032 }
4033
4034 /*
4035 * wm_tbi_mediachange: [ifmedia interface function]
4036 *
4037 * Set hardware to newly-selected media on a 1000BASE-X device.
4038 */
4039 static int
4040 wm_tbi_mediachange(struct ifnet *ifp)
4041 {
4042 struct wm_softc *sc = ifp->if_softc;
4043 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4044 uint32_t status;
4045 int i;
4046
4047 sc->sc_txcw = ife->ifm_data;
4048 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
4049 sc->sc_dev.dv_xname,sc->sc_txcw));
4050 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4051 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4052 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
4053 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4054 sc->sc_txcw |= TXCW_ANE;
4055 } else {
4056 /*If autonegotiation is turned off, force link up and turn on full duplex*/
4057 sc->sc_txcw &= ~TXCW_ANE;
4058 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4059 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4060 delay(1000);
4061 }
4062
4063 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4064 sc->sc_dev.dv_xname,sc->sc_txcw));
4065 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4066 delay(10000);
4067
4068 /* NOTE: CTRL will update TFCE and RFCE automatically. */
4069
4070 sc->sc_tbi_anstate = 0;
4071
4072 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4073 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i));
4074
4075 /*
4076 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4077 * optics detect a signal, 0 if they don't.
4078 */
4079 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4080 /* Have signal; wait for the link to come up. */
4081
4082 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4083 /*
4084 * Reset the link, and let autonegotiation do its thing
4085 */
4086 sc->sc_ctrl |= CTRL_LRST;
4087 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4088 delay(1000);
4089 sc->sc_ctrl &= ~CTRL_LRST;
4090 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4091 delay(1000);
4092 }
4093
4094 for (i = 0; i < 50; i++) {
4095 delay(10000);
4096 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4097 break;
4098 }
4099
4100 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4101 sc->sc_dev.dv_xname,i));
4102
4103 status = CSR_READ(sc, WMREG_STATUS);
4104 DPRINTF(WM_DEBUG_LINK,
4105 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4106 sc->sc_dev.dv_xname,status, STATUS_LU));
4107 if (status & STATUS_LU) {
4108 /* Link is up. */
4109 DPRINTF(WM_DEBUG_LINK,
4110 ("%s: LINK: set media -> link up %s\n",
4111 sc->sc_dev.dv_xname,
4112 (status & STATUS_FD) ? "FDX" : "HDX"));
4113 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4114 sc->sc_fcrtl &= ~FCRTL_XONE;
4115 if (status & STATUS_FD)
4116 sc->sc_tctl |=
4117 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4118 else
4119 sc->sc_tctl |=
4120 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4121 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4122 sc->sc_fcrtl |= FCRTL_XONE;
4123 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4124 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4125 WMREG_OLD_FCRTL : WMREG_FCRTL,
4126 sc->sc_fcrtl);
4127 sc->sc_tbi_linkup = 1;
4128 } else {
4129 /* Link is down. */
4130 DPRINTF(WM_DEBUG_LINK,
4131 ("%s: LINK: set media -> link down\n",
4132 sc->sc_dev.dv_xname));
4133 sc->sc_tbi_linkup = 0;
4134 }
4135 } else {
4136 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4137 sc->sc_dev.dv_xname));
4138 sc->sc_tbi_linkup = 0;
4139 }
4140
4141 wm_tbi_set_linkled(sc);
4142
4143 return (0);
4144 }
4145
4146 /*
4147 * wm_tbi_set_linkled:
4148 *
4149 * Update the link LED on 1000BASE-X devices.
4150 */
4151 static void
4152 wm_tbi_set_linkled(struct wm_softc *sc)
4153 {
4154
4155 if (sc->sc_tbi_linkup)
4156 sc->sc_ctrl |= CTRL_SWDPIN(0);
4157 else
4158 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4159
4160 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4161 }
4162
4163 /*
4164 * wm_tbi_check_link:
4165 *
4166 * Check the link on 1000BASE-X devices.
4167 */
4168 static void
4169 wm_tbi_check_link(struct wm_softc *sc)
4170 {
4171 uint32_t rxcw, ctrl, status;
4172
4173 if (sc->sc_tbi_anstate == 0)
4174 return;
4175 else if (sc->sc_tbi_anstate > 1) {
4176 DPRINTF(WM_DEBUG_LINK,
4177 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
4178 sc->sc_tbi_anstate));
4179 sc->sc_tbi_anstate--;
4180 return;
4181 }
4182
4183 sc->sc_tbi_anstate = 0;
4184
4185 rxcw = CSR_READ(sc, WMREG_RXCW);
4186 ctrl = CSR_READ(sc, WMREG_CTRL);
4187 status = CSR_READ(sc, WMREG_STATUS);
4188
4189 if ((status & STATUS_LU) == 0) {
4190 DPRINTF(WM_DEBUG_LINK,
4191 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
4192 sc->sc_tbi_linkup = 0;
4193 } else {
4194 DPRINTF(WM_DEBUG_LINK,
4195 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
4196 (status & STATUS_FD) ? "FDX" : "HDX"));
4197 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4198 sc->sc_fcrtl &= ~FCRTL_XONE;
4199 if (status & STATUS_FD)
4200 sc->sc_tctl |=
4201 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4202 else
4203 sc->sc_tctl |=
4204 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4205 if (ctrl & CTRL_TFCE)
4206 sc->sc_fcrtl |= FCRTL_XONE;
4207 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4208 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4209 WMREG_OLD_FCRTL : WMREG_FCRTL,
4210 sc->sc_fcrtl);
4211 sc->sc_tbi_linkup = 1;
4212 }
4213
4214 wm_tbi_set_linkled(sc);
4215 }
4216
4217 /*
4218 * wm_gmii_reset:
4219 *
4220 * Reset the PHY.
4221 */
4222 static void
4223 wm_gmii_reset(struct wm_softc *sc)
4224 {
4225 uint32_t reg;
4226 int func = 0; /* XXX gcc */
4227
4228 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
4229 if (wm_get_swfwhw_semaphore(sc))
4230 return;
4231 }
4232 if (sc->sc_type == WM_T_80003) {
4233 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4234 if (wm_get_swfw_semaphore(sc,
4235 func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4236 return;
4237 }
4238 if (sc->sc_type >= WM_T_82544) {
4239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4240 delay(20000);
4241
4242 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4243 delay(20000);
4244 } else {
4245 /*
4246 * With 82543, we need to force speed and duplex on the MAC
4247 * equal to what the PHY speed and duplex configuration is.
4248 * In addition, we need to perform a hardware reset on the PHY
4249 * to take it out of reset.
4250 */
4251 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4252 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4253
4254 /* The PHY reset pin is active-low. */
4255 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4256 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4257 CTRL_EXT_SWDPIN(4));
4258 reg |= CTRL_EXT_SWDPIO(4);
4259
4260 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4261 delay(10);
4262
4263 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4264 delay(10000);
4265
4266 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4267 delay(10);
4268 #if 0
4269 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4270 #endif
4271 }
4272 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
4273 wm_put_swfwhw_semaphore(sc);
4274 if (sc->sc_type == WM_T_80003)
4275 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4276 }
4277
4278 /*
4279 * wm_gmii_mediainit:
4280 *
4281 * Initialize media for use on 1000BASE-T devices.
4282 */
4283 static void
4284 wm_gmii_mediainit(struct wm_softc *sc)
4285 {
4286 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4287
4288 /* We have MII. */
4289 sc->sc_flags |= WM_F_HAS_MII;
4290
4291 if (sc->sc_type >= WM_T_80003)
4292 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4293 else
4294 sc->sc_tipg = TIPG_1000T_DFLT;
4295
4296 /*
4297 * Let the chip set speed/duplex on its own based on
4298 * signals from the PHY.
4299 * XXXbouyer - I'm not sure this is right for the 80003,
4300 * the em driver only sets CTRL_SLU here - but it seems to work.
4301 */
4302 sc->sc_ctrl |= CTRL_SLU;
4303 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4304
4305 /* Initialize our media structures and probe the GMII. */
4306 sc->sc_mii.mii_ifp = ifp;
4307
4308 if (sc->sc_type >= WM_T_80003) {
4309 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4310 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4311 } else if (sc->sc_type >= WM_T_82544) {
4312 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4313 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4314 } else {
4315 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4316 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4317 }
4318 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4319
4320 wm_gmii_reset(sc);
4321
4322 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4323 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4324 wm_gmii_mediastatus);
4325
4326 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4327 MII_OFFSET_ANY, MIIF_DOPAUSE);
4328 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4329 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4330 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4331 } else
4332 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4333 }
4334
4335 /*
4336 * wm_gmii_mediastatus: [ifmedia interface function]
4337 *
4338 * Get the current interface media status on a 1000BASE-T device.
4339 */
4340 static void
4341 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4342 {
4343 struct wm_softc *sc = ifp->if_softc;
4344
4345 ether_mediastatus(ifp, ifmr);
4346 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4347 sc->sc_flowflags;
4348 }
4349
4350 /*
4351 * wm_gmii_mediachange: [ifmedia interface function]
4352 *
4353 * Set hardware to newly-selected media on a 1000BASE-T device.
4354 */
4355 static int
4356 wm_gmii_mediachange(struct ifnet *ifp)
4357 {
4358 struct wm_softc *sc = ifp->if_softc;
4359 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4360 int rc;
4361
4362 if ((ifp->if_flags & IFF_UP) == 0)
4363 return 0;
4364
4365 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4366 sc->sc_ctrl |= CTRL_SLU;
4367 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4368 || (sc->sc_type > WM_T_82543)) {
4369 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4370 } else {
4371 sc->sc_ctrl &= ~CTRL_ASDE;
4372 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4373 if (ife->ifm_media & IFM_FDX)
4374 sc->sc_ctrl |= CTRL_FD;
4375 switch(IFM_SUBTYPE(ife->ifm_media)) {
4376 case IFM_10_T:
4377 sc->sc_ctrl |= CTRL_SPEED_10;
4378 break;
4379 case IFM_100_TX:
4380 sc->sc_ctrl |= CTRL_SPEED_100;
4381 break;
4382 case IFM_1000_T:
4383 sc->sc_ctrl |= CTRL_SPEED_1000;
4384 break;
4385 default:
4386 panic("wm_gmii_mediachange: bad media 0x%x",
4387 ife->ifm_media);
4388 }
4389 }
4390 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4391 if (sc->sc_type <= WM_T_82543)
4392 wm_gmii_reset(sc);
4393
4394 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4395 return 0;
4396 return rc;
4397 }
4398
4399 #define MDI_IO CTRL_SWDPIN(2)
4400 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4401 #define MDI_CLK CTRL_SWDPIN(3)
4402
4403 static void
4404 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4405 {
4406 uint32_t i, v;
4407
4408 v = CSR_READ(sc, WMREG_CTRL);
4409 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4410 v |= MDI_DIR | CTRL_SWDPIO(3);
4411
4412 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4413 if (data & i)
4414 v |= MDI_IO;
4415 else
4416 v &= ~MDI_IO;
4417 CSR_WRITE(sc, WMREG_CTRL, v);
4418 delay(10);
4419 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4420 delay(10);
4421 CSR_WRITE(sc, WMREG_CTRL, v);
4422 delay(10);
4423 }
4424 }
4425
4426 static uint32_t
4427 i82543_mii_recvbits(struct wm_softc *sc)
4428 {
4429 uint32_t v, i, data = 0;
4430
4431 v = CSR_READ(sc, WMREG_CTRL);
4432 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4433 v |= CTRL_SWDPIO(3);
4434
4435 CSR_WRITE(sc, WMREG_CTRL, v);
4436 delay(10);
4437 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4438 delay(10);
4439 CSR_WRITE(sc, WMREG_CTRL, v);
4440 delay(10);
4441
4442 for (i = 0; i < 16; i++) {
4443 data <<= 1;
4444 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4445 delay(10);
4446 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4447 data |= 1;
4448 CSR_WRITE(sc, WMREG_CTRL, v);
4449 delay(10);
4450 }
4451
4452 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4453 delay(10);
4454 CSR_WRITE(sc, WMREG_CTRL, v);
4455 delay(10);
4456
4457 return (data);
4458 }
4459
4460 #undef MDI_IO
4461 #undef MDI_DIR
4462 #undef MDI_CLK
4463
4464 /*
4465 * wm_gmii_i82543_readreg: [mii interface function]
4466 *
4467 * Read a PHY register on the GMII (i82543 version).
4468 */
4469 static int
4470 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
4471 {
4472 struct wm_softc *sc = (void *) self;
4473 int rv;
4474
4475 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4476 i82543_mii_sendbits(sc, reg | (phy << 5) |
4477 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4478 rv = i82543_mii_recvbits(sc) & 0xffff;
4479
4480 DPRINTF(WM_DEBUG_GMII,
4481 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4482 sc->sc_dev.dv_xname, phy, reg, rv));
4483
4484 return (rv);
4485 }
4486
4487 /*
4488 * wm_gmii_i82543_writereg: [mii interface function]
4489 *
4490 * Write a PHY register on the GMII (i82543 version).
4491 */
4492 static void
4493 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
4494 {
4495 struct wm_softc *sc = (void *) self;
4496
4497 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4498 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4499 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4500 (MII_COMMAND_START << 30), 32);
4501 }
4502
4503 /*
4504 * wm_gmii_i82544_readreg: [mii interface function]
4505 *
4506 * Read a PHY register on the GMII.
4507 */
4508 static int
4509 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
4510 {
4511 struct wm_softc *sc = (void *) self;
4512 uint32_t mdic = 0;
4513 int i, rv;
4514
4515 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4516 MDIC_REGADD(reg));
4517
4518 for (i = 0; i < 320; i++) {
4519 mdic = CSR_READ(sc, WMREG_MDIC);
4520 if (mdic & MDIC_READY)
4521 break;
4522 delay(10);
4523 }
4524
4525 if ((mdic & MDIC_READY) == 0) {
4526 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4527 sc->sc_dev.dv_xname, phy, reg);
4528 rv = 0;
4529 } else if (mdic & MDIC_E) {
4530 #if 0 /* This is normal if no PHY is present. */
4531 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4532 sc->sc_dev.dv_xname, phy, reg);
4533 #endif
4534 rv = 0;
4535 } else {
4536 rv = MDIC_DATA(mdic);
4537 if (rv == 0xffff)
4538 rv = 0;
4539 }
4540
4541 return (rv);
4542 }
4543
4544 /*
4545 * wm_gmii_i82544_writereg: [mii interface function]
4546 *
4547 * Write a PHY register on the GMII.
4548 */
4549 static void
4550 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
4551 {
4552 struct wm_softc *sc = (void *) self;
4553 uint32_t mdic = 0;
4554 int i;
4555
4556 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4557 MDIC_REGADD(reg) | MDIC_DATA(val));
4558
4559 for (i = 0; i < 320; i++) {
4560 mdic = CSR_READ(sc, WMREG_MDIC);
4561 if (mdic & MDIC_READY)
4562 break;
4563 delay(10);
4564 }
4565
4566 if ((mdic & MDIC_READY) == 0)
4567 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4568 sc->sc_dev.dv_xname, phy, reg);
4569 else if (mdic & MDIC_E)
4570 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4571 sc->sc_dev.dv_xname, phy, reg);
4572 }
4573
4574 /*
4575 * wm_gmii_i80003_readreg: [mii interface function]
4576 *
4577 * Read a PHY register on the kumeran
4578 * This could be handled by the PHY layer if we didn't have to lock the
4579 * ressource ...
4580 */
4581 static int
4582 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
4583 {
4584 struct wm_softc *sc = (void *) self;
4585 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4586 int rv;
4587
4588 if (phy != 1) /* only one PHY on kumeran bus */
4589 return 0;
4590
4591 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4592 return 0;
4593
4594 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4595 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4596 reg >> GG82563_PAGE_SHIFT);
4597 } else {
4598 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4599 reg >> GG82563_PAGE_SHIFT);
4600 }
4601
4602 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4603 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4604 return (rv);
4605 }
4606
4607 /*
4608 * wm_gmii_i80003_writereg: [mii interface function]
4609 *
4610 * Write a PHY register on the kumeran.
4611 * This could be handled by the PHY layer if we didn't have to lock the
4612 * ressource ...
4613 */
4614 static void
4615 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
4616 {
4617 struct wm_softc *sc = (void *) self;
4618 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4619
4620 if (phy != 1) /* only one PHY on kumeran bus */
4621 return;
4622
4623 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4624 return;
4625
4626 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4627 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4628 reg >> GG82563_PAGE_SHIFT);
4629 } else {
4630 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4631 reg >> GG82563_PAGE_SHIFT);
4632 }
4633
4634 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4635 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4636 }
4637
4638 /*
4639 * wm_gmii_statchg: [mii interface function]
4640 *
4641 * Callback from MII layer when media changes.
4642 */
4643 static void
4644 wm_gmii_statchg(struct device *self)
4645 {
4646 struct wm_softc *sc = (void *) self;
4647 struct mii_data *mii = &sc->sc_mii;
4648
4649 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4650 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4651 sc->sc_fcrtl &= ~FCRTL_XONE;
4652
4653 /*
4654 * Get flow control negotiation result.
4655 */
4656 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4657 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4658 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4659 mii->mii_media_active &= ~IFM_ETH_FMASK;
4660 }
4661
4662 if (sc->sc_flowflags & IFM_FLOW) {
4663 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4664 sc->sc_ctrl |= CTRL_TFCE;
4665 sc->sc_fcrtl |= FCRTL_XONE;
4666 }
4667 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4668 sc->sc_ctrl |= CTRL_RFCE;
4669 }
4670
4671 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4672 DPRINTF(WM_DEBUG_LINK,
4673 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
4674 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4675 } else {
4676 DPRINTF(WM_DEBUG_LINK,
4677 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
4678 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4679 }
4680
4681 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4682 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4683 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4684 : WMREG_FCRTL, sc->sc_fcrtl);
4685 if (sc->sc_type >= WM_T_80003) {
4686 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4687 case IFM_1000_T:
4688 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4689 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4690 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4691 break;
4692 default:
4693 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4694 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4695 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4696 break;
4697 }
4698 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4699 }
4700 }
4701
4702 /*
4703 * wm_kmrn_i80003_readreg:
4704 *
4705 * Read a kumeran register
4706 */
4707 static int
4708 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4709 {
4710 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4711 int rv;
4712
4713 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4714 return 0;
4715
4716 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4717 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4718 KUMCTRLSTA_REN);
4719 delay(2);
4720
4721 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4722 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4723 return (rv);
4724 }
4725
4726 /*
4727 * wm_kmrn_i80003_writereg:
4728 *
4729 * Write a kumeran register
4730 */
4731 static void
4732 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4733 {
4734 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4735
4736 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4737 return;
4738
4739 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4740 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4741 (val & KUMCTRLSTA_MASK));
4742 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4743 }
4744
4745 static int
4746 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4747 {
4748 uint32_t eecd = 0;
4749
4750 if (sc->sc_type == WM_T_82573) {
4751 eecd = CSR_READ(sc, WMREG_EECD);
4752
4753 /* Isolate bits 15 & 16 */
4754 eecd = ((eecd >> 15) & 0x03);
4755
4756 /* If both bits are set, device is Flash type */
4757 if (eecd == 0x03) {
4758 return 0;
4759 }
4760 }
4761 return 1;
4762 }
4763
4764 static int
4765 wm_get_swsm_semaphore(struct wm_softc *sc)
4766 {
4767 int32_t timeout;
4768 uint32_t swsm;
4769
4770 /* Get the FW semaphore. */
4771 timeout = 1000 + 1; /* XXX */
4772 while (timeout) {
4773 swsm = CSR_READ(sc, WMREG_SWSM);
4774 swsm |= SWSM_SWESMBI;
4775 CSR_WRITE(sc, WMREG_SWSM, swsm);
4776 /* if we managed to set the bit we got the semaphore. */
4777 swsm = CSR_READ(sc, WMREG_SWSM);
4778 if (swsm & SWSM_SWESMBI)
4779 break;
4780
4781 delay(50);
4782 timeout--;
4783 }
4784
4785 if (timeout == 0) {
4786 aprint_error("%s: could not acquire EEPROM GNT\n",
4787 sc->sc_dev.dv_xname);
4788 /* Release semaphores */
4789 wm_put_swsm_semaphore(sc);
4790 return 1;
4791 }
4792 return 0;
4793 }
4794
4795 static void
4796 wm_put_swsm_semaphore(struct wm_softc *sc)
4797 {
4798 uint32_t swsm;
4799
4800 swsm = CSR_READ(sc, WMREG_SWSM);
4801 swsm &= ~(SWSM_SWESMBI);
4802 CSR_WRITE(sc, WMREG_SWSM, swsm);
4803 }
4804
4805 static int
4806 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4807 {
4808 uint32_t swfw_sync;
4809 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4810 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4811 int timeout = 200;
4812
4813 for(timeout = 0; timeout < 200; timeout++) {
4814 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4815 if (wm_get_swsm_semaphore(sc))
4816 return 1;
4817 }
4818 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4819 if ((swfw_sync & (swmask | fwmask)) == 0) {
4820 swfw_sync |= swmask;
4821 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4822 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4823 wm_put_swsm_semaphore(sc);
4824 return 0;
4825 }
4826 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4827 wm_put_swsm_semaphore(sc);
4828 delay(5000);
4829 }
4830 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4831 sc->sc_dev.dv_xname, mask, swfw_sync);
4832 return 1;
4833 }
4834
4835 static void
4836 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4837 {
4838 uint32_t swfw_sync;
4839
4840 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4841 while (wm_get_swsm_semaphore(sc) != 0)
4842 continue;
4843 }
4844 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4845 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4846 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4847 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4848 wm_put_swsm_semaphore(sc);
4849 }
4850
4851 static int
4852 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4853 {
4854 uint32_t ext_ctrl;
4855 int timeout = 200;
4856
4857 for(timeout = 0; timeout < 200; timeout++) {
4858 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4859 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4860 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4861
4862 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4863 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4864 return 0;
4865 delay(5000);
4866 }
4867 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4868 sc->sc_dev.dv_xname, ext_ctrl);
4869 return 1;
4870 }
4871
4872 static void
4873 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4874 {
4875 uint32_t ext_ctrl;
4876 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4877 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4878 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4879 }
4880
4881 /******************************************************************************
4882 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4883 * register.
4884 *
4885 * sc - Struct containing variables accessed by shared code
4886 * offset - offset of word in the EEPROM to read
4887 * data - word read from the EEPROM
4888 * words - number of words to read
4889 *****************************************************************************/
4890 static int
4891 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4892 {
4893 int32_t error = 0;
4894 uint32_t flash_bank = 0;
4895 uint32_t act_offset = 0;
4896 uint32_t bank_offset = 0;
4897 uint16_t word = 0;
4898 uint16_t i = 0;
4899
4900 /* We need to know which is the valid flash bank. In the event
4901 * that we didn't allocate eeprom_shadow_ram, we may not be
4902 * managing flash_bank. So it cannot be trusted and needs
4903 * to be updated with each read.
4904 */
4905 /* Value of bit 22 corresponds to the flash bank we're on. */
4906 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4907
4908 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4909 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4910
4911 error = wm_get_swfwhw_semaphore(sc);
4912 if (error)
4913 return error;
4914
4915 for (i = 0; i < words; i++) {
4916 /* The NVM part needs a byte offset, hence * 2 */
4917 act_offset = bank_offset + ((offset + i) * 2);
4918 error = wm_read_ich8_word(sc, act_offset, &word);
4919 if (error)
4920 break;
4921 data[i] = word;
4922 }
4923
4924 wm_put_swfwhw_semaphore(sc);
4925 return error;
4926 }
4927
4928 /******************************************************************************
4929 * This function does initial flash setup so that a new read/write/erase cycle
4930 * can be started.
4931 *
4932 * sc - The pointer to the hw structure
4933 ****************************************************************************/
4934 static int32_t
4935 wm_ich8_cycle_init(struct wm_softc *sc)
4936 {
4937 uint16_t hsfsts;
4938 int32_t error = 1;
4939 int32_t i = 0;
4940
4941 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4942
4943 /* May be check the Flash Des Valid bit in Hw status */
4944 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4945 return error;
4946 }
4947
4948 /* Clear FCERR in Hw status by writing 1 */
4949 /* Clear DAEL in Hw status by writing a 1 */
4950 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4951
4952 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4953
4954 /* Either we should have a hardware SPI cycle in progress bit to check
4955 * against, in order to start a new cycle or FDONE bit should be changed
4956 * in the hardware so that it is 1 after harware reset, which can then be
4957 * used as an indication whether a cycle is in progress or has been
4958 * completed .. we should also have some software semaphore mechanism to
4959 * guard FDONE or the cycle in progress bit so that two threads access to
4960 * those bits can be sequentiallized or a way so that 2 threads dont
4961 * start the cycle at the same time */
4962
4963 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4964 /* There is no cycle running at present, so we can start a cycle */
4965 /* Begin by setting Flash Cycle Done. */
4966 hsfsts |= HSFSTS_DONE;
4967 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4968 error = 0;
4969 } else {
4970 /* otherwise poll for sometime so the current cycle has a chance
4971 * to end before giving up. */
4972 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
4973 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4974 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4975 error = 0;
4976 break;
4977 }
4978 delay(1);
4979 }
4980 if (error == 0) {
4981 /* Successful in waiting for previous cycle to timeout,
4982 * now set the Flash Cycle Done. */
4983 hsfsts |= HSFSTS_DONE;
4984 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4985 }
4986 }
4987 return error;
4988 }
4989
4990 /******************************************************************************
4991 * This function starts a flash cycle and waits for its completion
4992 *
4993 * sc - The pointer to the hw structure
4994 ****************************************************************************/
4995 static int32_t
4996 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
4997 {
4998 uint16_t hsflctl;
4999 uint16_t hsfsts;
5000 int32_t error = 1;
5001 uint32_t i = 0;
5002
5003 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5004 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5005 hsflctl |= HSFCTL_GO;
5006 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5007
5008 /* wait till FDONE bit is set to 1 */
5009 do {
5010 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5011 if (hsfsts & HSFSTS_DONE)
5012 break;
5013 delay(1);
5014 i++;
5015 } while (i < timeout);
5016 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5017 error = 0;
5018 }
5019 return error;
5020 }
5021
5022 /******************************************************************************
5023 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5024 *
5025 * sc - The pointer to the hw structure
5026 * index - The index of the byte or word to read.
5027 * size - Size of data to read, 1=byte 2=word
5028 * data - Pointer to the word to store the value read.
5029 *****************************************************************************/
5030 static int32_t
5031 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5032 uint32_t size, uint16_t* data)
5033 {
5034 uint16_t hsfsts;
5035 uint16_t hsflctl;
5036 uint32_t flash_linear_address;
5037 uint32_t flash_data = 0;
5038 int32_t error = 1;
5039 int32_t count = 0;
5040
5041 if (size < 1 || size > 2 || data == 0x0 ||
5042 index > ICH_FLASH_LINEAR_ADDR_MASK)
5043 return error;
5044
5045 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5046 sc->sc_ich8_flash_base;
5047
5048 do {
5049 delay(1);
5050 /* Steps */
5051 error = wm_ich8_cycle_init(sc);
5052 if (error)
5053 break;
5054
5055 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5056 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5057 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5058 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5059 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5060
5061 /* Write the last 24 bits of index into Flash Linear address field in
5062 * Flash Address */
5063 /* TODO: TBD maybe check the index against the size of flash */
5064
5065 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5066
5067 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5068
5069 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5070 * sequence a few more times, else read in (shift in) the Flash Data0,
5071 * the order is least significant byte first msb to lsb */
5072 if (error == 0) {
5073 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5074 if (size == 1) {
5075 *data = (uint8_t)(flash_data & 0x000000FF);
5076 } else if (size == 2) {
5077 *data = (uint16_t)(flash_data & 0x0000FFFF);
5078 }
5079 break;
5080 } else {
5081 /* If we've gotten here, then things are probably completely hosed,
5082 * but if the error condition is detected, it won't hurt to give
5083 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5084 */
5085 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5086 if (hsfsts & HSFSTS_ERR) {
5087 /* Repeat for some time before giving up. */
5088 continue;
5089 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5090 break;
5091 }
5092 }
5093 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5094
5095 return error;
5096 }
5097
5098 #if 0
5099 /******************************************************************************
5100 * Reads a single byte from the NVM using the ICH8 flash access registers.
5101 *
5102 * sc - pointer to wm_hw structure
5103 * index - The index of the byte to read.
5104 * data - Pointer to a byte to store the value read.
5105 *****************************************************************************/
5106 static int32_t
5107 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5108 {
5109 int32_t status;
5110 uint16_t word = 0;
5111
5112 status = wm_read_ich8_data(sc, index, 1, &word);
5113 if (status == 0) {
5114 *data = (uint8_t)word;
5115 }
5116
5117 return status;
5118 }
5119 #endif
5120
5121 /******************************************************************************
5122 * Reads a word from the NVM using the ICH8 flash access registers.
5123 *
5124 * sc - pointer to wm_hw structure
5125 * index - The starting byte index of the word to read.
5126 * data - Pointer to a word to store the value read.
5127 *****************************************************************************/
5128 static int32_t
5129 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5130 {
5131 int32_t status;
5132
5133 status = wm_read_ich8_data(sc, index, 2, data);
5134 return status;
5135 }
5136