if_wm.c revision 1.159 1 /* $NetBSD: if_wm.c,v 1.159 2008/08/15 15:45:52 simonb Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers.
78 * - Fix hw VLAN assist.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.159 2008/08/15 15:45:52 simonb Exp $");
83
84 #include "bpfilter.h"
85 #include "rnd.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133
134 #include <dev/pci/if_wmreg.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 typedef enum {
236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */
253 WM_T_80003, /* i80003 */
254 WM_T_ICH8, /* ICH8 LAN */
255 WM_T_ICH9, /* ICH9 LAN */
256 } wm_chip_type;
257
258 /*
259 * Software state per device.
260 */
261 struct wm_softc {
262 struct device sc_dev; /* generic device information */
263 bus_space_tag_t sc_st; /* bus space tag */
264 bus_space_handle_t sc_sh; /* bus space handle */
265 bus_space_tag_t sc_iot; /* I/O space tag */
266 bus_space_handle_t sc_ioh; /* I/O space handle */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270 struct ethercom sc_ethercom; /* ethernet common data */
271 pci_chipset_tag_t sc_pc;
272 pcitag_t sc_pcitag;
273
274 wm_chip_type sc_type; /* chip type */
275 int sc_flags; /* flags; see below */
276 int sc_bus_speed; /* PCI/PCIX bus speed */
277 int sc_pcix_offset; /* PCIX capability register offset */
278 int sc_flowflags; /* 802.3x flow control flags */
279
280 void *sc_ih; /* interrupt cookie */
281
282 int sc_ee_addrbits; /* EEPROM address bits */
283
284 struct mii_data sc_mii; /* MII/media information */
285
286 callout_t sc_tick_ch; /* tick callout */
287
288 bus_dmamap_t sc_cddmamap; /* control data DMA map */
289 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
290
291 int sc_align_tweak;
292
293 /*
294 * Software state for the transmit and receive descriptors.
295 */
296 int sc_txnum; /* must be a power of two */
297 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
298 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
299
300 /*
301 * Control data structures.
302 */
303 int sc_ntxdesc; /* must be a power of two */
304 struct wm_control_data_82544 *sc_control_data;
305 #define sc_txdescs sc_control_data->wcd_txdescs
306 #define sc_rxdescs sc_control_data->wcd_rxdescs
307
308 #ifdef WM_EVENT_COUNTERS
309 /* Event counters. */
310 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
311 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
312 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
313 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
314 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
315 struct evcnt sc_ev_rxintr; /* Rx interrupts */
316 struct evcnt sc_ev_linkintr; /* Link interrupts */
317
318 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
319 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
320 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
321 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
322 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
323 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
324 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
325 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
326
327 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
328 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
329
330 struct evcnt sc_ev_tu; /* Tx underrun */
331
332 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
333 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
334 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
335 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
336 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
337 #endif /* WM_EVENT_COUNTERS */
338
339 bus_addr_t sc_tdt_reg; /* offset of TDT register */
340
341 int sc_txfree; /* number of free Tx descriptors */
342 int sc_txnext; /* next ready Tx descriptor */
343
344 int sc_txsfree; /* number of free Tx jobs */
345 int sc_txsnext; /* next free Tx job */
346 int sc_txsdirty; /* dirty Tx jobs */
347
348 /* These 5 variables are used only on the 82547. */
349 int sc_txfifo_size; /* Tx FIFO size */
350 int sc_txfifo_head; /* current head of FIFO */
351 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
352 int sc_txfifo_stall; /* Tx FIFO is stalled */
353 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
354
355 bus_addr_t sc_rdt_reg; /* offset of RDT register */
356
357 int sc_rxptr; /* next ready Rx descriptor/queue ent */
358 int sc_rxdiscard;
359 int sc_rxlen;
360 struct mbuf *sc_rxhead;
361 struct mbuf *sc_rxtail;
362 struct mbuf **sc_rxtailp;
363
364 uint32_t sc_ctrl; /* prototype CTRL register */
365 #if 0
366 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
367 #endif
368 uint32_t sc_icr; /* prototype interrupt bits */
369 uint32_t sc_itr; /* prototype intr throttling reg */
370 uint32_t sc_tctl; /* prototype TCTL register */
371 uint32_t sc_rctl; /* prototype RCTL register */
372 uint32_t sc_txcw; /* prototype TXCW register */
373 uint32_t sc_tipg; /* prototype TIPG register */
374 uint32_t sc_fcrtl; /* prototype FCRTL register */
375 uint32_t sc_pba; /* prototype PBA register */
376
377 int sc_tbi_linkup; /* TBI link status */
378 int sc_tbi_anstate; /* autonegotiation state */
379
380 int sc_mchash_type; /* multicast filter offset */
381
382 #if NRND > 0
383 rndsource_element_t rnd_source; /* random source */
384 #endif
385 int sc_ich8_flash_base;
386 int sc_ich8_flash_bank_size;
387 };
388
389 #define WM_RXCHAIN_RESET(sc) \
390 do { \
391 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
392 *(sc)->sc_rxtailp = NULL; \
393 (sc)->sc_rxlen = 0; \
394 } while (/*CONSTCOND*/0)
395
396 #define WM_RXCHAIN_LINK(sc, m) \
397 do { \
398 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
399 (sc)->sc_rxtailp = &(m)->m_next; \
400 } while (/*CONSTCOND*/0)
401
402 /* sc_flags */
403 #define WM_F_HAS_MII 0x0001 /* has MII */
404 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
405 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
406 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
407 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
408 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
409 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
410 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
411 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
412 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
413 #define WM_F_CSA 0x0400 /* bus is CSA */
414 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
415 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
416 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
417
418 #ifdef WM_EVENT_COUNTERS
419 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
420 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
421 #else
422 #define WM_EVCNT_INCR(ev) /* nothing */
423 #define WM_EVCNT_ADD(ev, val) /* nothing */
424 #endif
425
426 #define CSR_READ(sc, reg) \
427 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
428 #define CSR_WRITE(sc, reg, val) \
429 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
430 #define CSR_WRITE_FLUSH(sc) \
431 (void) CSR_READ((sc), WMREG_STATUS)
432
433 #define ICH8_FLASH_READ32(sc, reg) \
434 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
435 #define ICH8_FLASH_WRITE32(sc, reg, data) \
436 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
437
438 #define ICH8_FLASH_READ16(sc, reg) \
439 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
440 #define ICH8_FLASH_WRITE16(sc, reg, data) \
441 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
442
443 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
444 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
445
446 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
447 #define WM_CDTXADDR_HI(sc, x) \
448 (sizeof(bus_addr_t) == 8 ? \
449 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
450
451 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
452 #define WM_CDRXADDR_HI(sc, x) \
453 (sizeof(bus_addr_t) == 8 ? \
454 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
455
456 #define WM_CDTXSYNC(sc, x, n, ops) \
457 do { \
458 int __x, __n; \
459 \
460 __x = (x); \
461 __n = (n); \
462 \
463 /* If it will wrap around, sync to the end of the ring. */ \
464 if ((__x + __n) > WM_NTXDESC(sc)) { \
465 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
466 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
467 (WM_NTXDESC(sc) - __x), (ops)); \
468 __n -= (WM_NTXDESC(sc) - __x); \
469 __x = 0; \
470 } \
471 \
472 /* Now sync whatever is left. */ \
473 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
474 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
475 } while (/*CONSTCOND*/0)
476
477 #define WM_CDRXSYNC(sc, x, ops) \
478 do { \
479 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
480 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
481 } while (/*CONSTCOND*/0)
482
483 #define WM_INIT_RXDESC(sc, x) \
484 do { \
485 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
486 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
487 struct mbuf *__m = __rxs->rxs_mbuf; \
488 \
489 /* \
490 * Note: We scoot the packet forward 2 bytes in the buffer \
491 * so that the payload after the Ethernet header is aligned \
492 * to a 4-byte boundary. \
493 * \
494 * XXX BRAINDAMAGE ALERT! \
495 * The stupid chip uses the same size for every buffer, which \
496 * is set in the Receive Control register. We are using the 2K \
497 * size option, but what we REALLY want is (2K - 2)! For this \
498 * reason, we can't "scoot" packets longer than the standard \
499 * Ethernet MTU. On strict-alignment platforms, if the total \
500 * size exceeds (2K - 2) we set align_tweak to 0 and let \
501 * the upper layer copy the headers. \
502 */ \
503 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
504 \
505 wm_set_dma_addr(&__rxd->wrx_addr, \
506 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
507 __rxd->wrx_len = 0; \
508 __rxd->wrx_cksum = 0; \
509 __rxd->wrx_status = 0; \
510 __rxd->wrx_errors = 0; \
511 __rxd->wrx_special = 0; \
512 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
513 \
514 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
515 } while (/*CONSTCOND*/0)
516
517 static void wm_start(struct ifnet *);
518 static void wm_watchdog(struct ifnet *);
519 static int wm_ioctl(struct ifnet *, u_long, void *);
520 static int wm_init(struct ifnet *);
521 static void wm_stop(struct ifnet *, int);
522
523 static void wm_reset(struct wm_softc *);
524 static void wm_rxdrain(struct wm_softc *);
525 static int wm_add_rxbuf(struct wm_softc *, int);
526 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
527 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
528 static int wm_validate_eeprom_checksum(struct wm_softc *);
529 static void wm_tick(void *);
530
531 static void wm_set_filter(struct wm_softc *);
532
533 static int wm_intr(void *);
534 static void wm_txintr(struct wm_softc *);
535 static void wm_rxintr(struct wm_softc *);
536 static void wm_linkintr(struct wm_softc *, uint32_t);
537
538 static void wm_tbi_mediainit(struct wm_softc *);
539 static int wm_tbi_mediachange(struct ifnet *);
540 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
541
542 static void wm_tbi_set_linkled(struct wm_softc *);
543 static void wm_tbi_check_link(struct wm_softc *);
544
545 static void wm_gmii_reset(struct wm_softc *);
546
547 static int wm_gmii_i82543_readreg(device_t, int, int);
548 static void wm_gmii_i82543_writereg(device_t, int, int, int);
549
550 static int wm_gmii_i82544_readreg(device_t, int, int);
551 static void wm_gmii_i82544_writereg(device_t, int, int, int);
552
553 static int wm_gmii_i80003_readreg(device_t, int, int);
554 static void wm_gmii_i80003_writereg(device_t, int, int, int);
555
556 static void wm_gmii_statchg(device_t);
557
558 static void wm_gmii_mediainit(struct wm_softc *);
559 static int wm_gmii_mediachange(struct ifnet *);
560 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
561
562 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
563 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
564
565 static int wm_match(device_t, struct cfdata *, void *);
566 static void wm_attach(device_t, device_t, void *);
567 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
568 static void wm_get_auto_rd_done(struct wm_softc *);
569 static int wm_get_swsm_semaphore(struct wm_softc *);
570 static void wm_put_swsm_semaphore(struct wm_softc *);
571 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
572 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
573 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
574 static int wm_get_swfwhw_semaphore(struct wm_softc *);
575 static void wm_put_swfwhw_semaphore(struct wm_softc *);
576
577 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
578 static int32_t wm_ich8_cycle_init(struct wm_softc *);
579 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
580 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
581 uint32_t, uint16_t *);
582 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
583
584 CFATTACH_DECL(wm, sizeof(struct wm_softc),
585 wm_match, wm_attach, NULL, NULL);
586
587 static void wm_82547_txfifo_stall(void *);
588
589 /*
590 * Devices supported by this driver.
591 */
592 static const struct wm_product {
593 pci_vendor_id_t wmp_vendor;
594 pci_product_id_t wmp_product;
595 const char *wmp_name;
596 wm_chip_type wmp_type;
597 int wmp_flags;
598 #define WMP_F_1000X 0x01
599 #define WMP_F_1000T 0x02
600 } wm_products[] = {
601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
602 "Intel i82542 1000BASE-X Ethernet",
603 WM_T_82542_2_1, WMP_F_1000X },
604
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
606 "Intel i82543GC 1000BASE-X Ethernet",
607 WM_T_82543, WMP_F_1000X },
608
609 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
610 "Intel i82543GC 1000BASE-T Ethernet",
611 WM_T_82543, WMP_F_1000T },
612
613 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
614 "Intel i82544EI 1000BASE-T Ethernet",
615 WM_T_82544, WMP_F_1000T },
616
617 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
618 "Intel i82544EI 1000BASE-X Ethernet",
619 WM_T_82544, WMP_F_1000X },
620
621 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
622 "Intel i82544GC 1000BASE-T Ethernet",
623 WM_T_82544, WMP_F_1000T },
624
625 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
626 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
627 WM_T_82544, WMP_F_1000T },
628
629 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
630 "Intel i82540EM 1000BASE-T Ethernet",
631 WM_T_82540, WMP_F_1000T },
632
633 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
634 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
635 WM_T_82540, WMP_F_1000T },
636
637 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
638 "Intel i82540EP 1000BASE-T Ethernet",
639 WM_T_82540, WMP_F_1000T },
640
641 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
642 "Intel i82540EP 1000BASE-T Ethernet",
643 WM_T_82540, WMP_F_1000T },
644
645 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
646 "Intel i82540EP 1000BASE-T Ethernet",
647 WM_T_82540, WMP_F_1000T },
648
649 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
650 "Intel i82545EM 1000BASE-T Ethernet",
651 WM_T_82545, WMP_F_1000T },
652
653 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
654 "Intel i82545GM 1000BASE-T Ethernet",
655 WM_T_82545_3, WMP_F_1000T },
656
657 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
658 "Intel i82545GM 1000BASE-X Ethernet",
659 WM_T_82545_3, WMP_F_1000X },
660 #if 0
661 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
662 "Intel i82545GM Gigabit Ethernet (SERDES)",
663 WM_T_82545_3, WMP_F_SERDES },
664 #endif
665 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
666 "Intel i82546EB 1000BASE-T Ethernet",
667 WM_T_82546, WMP_F_1000T },
668
669 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
670 "Intel i82546EB 1000BASE-T Ethernet",
671 WM_T_82546, WMP_F_1000T },
672
673 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
674 "Intel i82545EM 1000BASE-X Ethernet",
675 WM_T_82545, WMP_F_1000X },
676
677 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
678 "Intel i82546EB 1000BASE-X Ethernet",
679 WM_T_82546, WMP_F_1000X },
680
681 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
682 "Intel i82546GB 1000BASE-T Ethernet",
683 WM_T_82546_3, WMP_F_1000T },
684
685 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
686 "Intel i82546GB 1000BASE-X Ethernet",
687 WM_T_82546_3, WMP_F_1000X },
688 #if 0
689 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
690 "Intel i82546GB Gigabit Ethernet (SERDES)",
691 WM_T_82546_3, WMP_F_SERDES },
692 #endif
693 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
694 "i82546GB quad-port Gigabit Ethernet",
695 WM_T_82546_3, WMP_F_1000T },
696
697 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
698 "i82546GB quad-port Gigabit Ethernet (KSP3)",
699 WM_T_82546_3, WMP_F_1000T },
700
701 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
702 "Intel PRO/1000MT (82546GB)",
703 WM_T_82546_3, WMP_F_1000T },
704
705 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
706 "Intel i82541EI 1000BASE-T Ethernet",
707 WM_T_82541, WMP_F_1000T },
708
709 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
710 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
711 WM_T_82541, WMP_F_1000T },
712
713 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
714 "Intel i82541EI Mobile 1000BASE-T Ethernet",
715 WM_T_82541, WMP_F_1000T },
716
717 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
718 "Intel i82541ER 1000BASE-T Ethernet",
719 WM_T_82541_2, WMP_F_1000T },
720
721 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
722 "Intel i82541GI 1000BASE-T Ethernet",
723 WM_T_82541_2, WMP_F_1000T },
724
725 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
726 "Intel i82541GI Mobile 1000BASE-T Ethernet",
727 WM_T_82541_2, WMP_F_1000T },
728
729 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
730 "Intel i82541PI 1000BASE-T Ethernet",
731 WM_T_82541_2, WMP_F_1000T },
732
733 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
734 "Intel i82547EI 1000BASE-T Ethernet",
735 WM_T_82547, WMP_F_1000T },
736
737 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
738 "Intel i82547EI Mobile 1000BASE-T Ethernet",
739 WM_T_82547, WMP_F_1000T },
740
741 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
742 "Intel i82547GI 1000BASE-T Ethernet",
743 WM_T_82547_2, WMP_F_1000T },
744
745 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
746 "Intel PRO/1000 PT (82571EB)",
747 WM_T_82571, WMP_F_1000T },
748
749 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
750 "Intel PRO/1000 PF (82571EB)",
751 WM_T_82571, WMP_F_1000X },
752 #if 0
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
754 "Intel PRO/1000 PB (82571EB)",
755 WM_T_82571, WMP_F_SERDES },
756 #endif
757 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
758 "Intel PRO/1000 QT (82571EB)",
759 WM_T_82571, WMP_F_1000T },
760
761 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
762 "Intel i82572EI 1000baseT Ethernet",
763 WM_T_82572, WMP_F_1000T },
764
765 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
766 "Intel PRO/1000 PT Quad Port Server Adapter",
767 WM_T_82571, WMP_F_1000T, },
768
769 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
770 "Intel i82572EI 1000baseX Ethernet",
771 WM_T_82572, WMP_F_1000X },
772 #if 0
773 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
774 "Intel i82572EI Gigabit Ethernet (SERDES)",
775 WM_T_82572, WMP_F_SERDES },
776 #endif
777
778 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
779 "Intel i82572EI 1000baseT Ethernet",
780 WM_T_82572, WMP_F_1000T },
781
782 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
783 "Intel i82573E",
784 WM_T_82573, WMP_F_1000T },
785
786 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
787 "Intel i82573E IAMT",
788 WM_T_82573, WMP_F_1000T },
789
790 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
791 "Intel i82573L Gigabit Ethernet",
792 WM_T_82573, WMP_F_1000T },
793
794 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
795 "i80003 dual 1000baseT Ethernet",
796 WM_T_80003, WMP_F_1000T },
797
798 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
799 "i80003 dual 1000baseX Ethernet",
800 WM_T_80003, WMP_F_1000T },
801 #if 0
802 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
803 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
804 WM_T_80003, WMP_F_SERDES },
805 #endif
806
807 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
808 "Intel i80003 1000baseT Ethernet",
809 WM_T_80003, WMP_F_1000T },
810 #if 0
811 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
812 "Intel i80003 Gigabit Ethernet (SERDES)",
813 WM_T_80003, WMP_F_SERDES },
814 #endif
815 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
816 "Intel i82801H (M_AMT) LAN Controller",
817 WM_T_ICH8, WMP_F_1000T },
818 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
819 "Intel i82801H (AMT) LAN Controller",
820 WM_T_ICH8, WMP_F_1000T },
821 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
822 "Intel i82801H LAN Controller",
823 WM_T_ICH8, WMP_F_1000T },
824 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
825 "Intel i82801H (IFE) LAN Controller",
826 WM_T_ICH8, WMP_F_1000T },
827 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
828 "Intel i82801H (M) LAN Controller",
829 WM_T_ICH8, WMP_F_1000T },
830 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
831 "Intel i82801H IFE (GT) LAN Controller",
832 WM_T_ICH8, WMP_F_1000T },
833 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
834 "Intel i82801H IFE (G) LAN Controller",
835 WM_T_ICH8, WMP_F_1000T },
836 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
837 "82801I (AMT) LAN Controller",
838 WM_T_ICH9, WMP_F_1000T },
839 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
840 "82801I LAN Controller",
841 WM_T_ICH9, WMP_F_1000T },
842 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
843 "82801I (G) LAN Controller",
844 WM_T_ICH9, WMP_F_1000T },
845 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
846 "82801I (GT) LAN Controller",
847 WM_T_ICH9, WMP_F_1000T },
848 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
849 "82801I (C) LAN Controller",
850 WM_T_ICH9, WMP_F_1000T },
851 { 0, 0,
852 NULL,
853 0, 0 },
854 };
855
856 #ifdef WM_EVENT_COUNTERS
857 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
858 #endif /* WM_EVENT_COUNTERS */
859
860 #if 0 /* Not currently used */
861 static inline uint32_t
862 wm_io_read(struct wm_softc *sc, int reg)
863 {
864
865 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
866 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
867 }
868 #endif
869
870 static inline void
871 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
872 {
873
874 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
875 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
876 }
877
878 static inline void
879 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
880 {
881 wa->wa_low = htole32(v & 0xffffffffU);
882 if (sizeof(bus_addr_t) == 8)
883 wa->wa_high = htole32((uint64_t) v >> 32);
884 else
885 wa->wa_high = 0;
886 }
887
888 static const struct wm_product *
889 wm_lookup(const struct pci_attach_args *pa)
890 {
891 const struct wm_product *wmp;
892
893 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
894 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
895 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
896 return (wmp);
897 }
898 return (NULL);
899 }
900
901 static int
902 wm_match(device_t parent, struct cfdata *cf, void *aux)
903 {
904 struct pci_attach_args *pa = aux;
905
906 if (wm_lookup(pa) != NULL)
907 return (1);
908
909 return (0);
910 }
911
912 static void
913 wm_attach(device_t parent, device_t self, void *aux)
914 {
915 struct wm_softc *sc = device_private(self);
916 struct pci_attach_args *pa = aux;
917 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
918 pci_chipset_tag_t pc = pa->pa_pc;
919 pci_intr_handle_t ih;
920 size_t cdata_size;
921 const char *intrstr = NULL;
922 const char *eetype;
923 bus_space_tag_t memt;
924 bus_space_handle_t memh;
925 bus_dma_segment_t seg;
926 int memh_valid;
927 int i, rseg, error;
928 const struct wm_product *wmp;
929 prop_data_t ea;
930 prop_number_t pn;
931 uint8_t enaddr[ETHER_ADDR_LEN];
932 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
933 pcireg_t preg, memtype;
934 uint32_t reg;
935
936 callout_init(&sc->sc_tick_ch, 0);
937
938 wmp = wm_lookup(pa);
939 if (wmp == NULL) {
940 printf("\n");
941 panic("wm_attach: impossible");
942 }
943
944 sc->sc_pc = pa->pa_pc;
945 sc->sc_pcitag = pa->pa_tag;
946
947 if (pci_dma64_available(pa))
948 sc->sc_dmat = pa->pa_dmat64;
949 else
950 sc->sc_dmat = pa->pa_dmat;
951
952 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
953 aprint_naive(": Ethernet controller\n");
954 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
955
956 sc->sc_type = wmp->wmp_type;
957 if (sc->sc_type < WM_T_82543) {
958 if (preg < 2) {
959 aprint_error_dev(&sc->sc_dev, "i82542 must be at least rev. 2\n");
960 return;
961 }
962 if (preg < 3)
963 sc->sc_type = WM_T_82542_2_0;
964 }
965
966 /*
967 * Map the device. All devices support memory-mapped acccess,
968 * and it is really required for normal operation.
969 */
970 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
971 switch (memtype) {
972 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
973 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
974 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
975 memtype, 0, &memt, &memh, NULL, NULL) == 0);
976 break;
977 default:
978 memh_valid = 0;
979 }
980
981 if (memh_valid) {
982 sc->sc_st = memt;
983 sc->sc_sh = memh;
984 } else {
985 aprint_error_dev(&sc->sc_dev, "unable to map device registers\n");
986 return;
987 }
988
989 /*
990 * In addition, i82544 and later support I/O mapped indirect
991 * register access. It is not desirable (nor supported in
992 * this driver) to use it for normal operation, though it is
993 * required to work around bugs in some chip versions.
994 */
995 if (sc->sc_type >= WM_T_82544) {
996 /* First we have to find the I/O BAR. */
997 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
998 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
999 PCI_MAPREG_TYPE_IO)
1000 break;
1001 }
1002 if (i == PCI_MAPREG_END)
1003 aprint_error_dev(&sc->sc_dev, "WARNING: unable to find I/O BAR\n");
1004 else {
1005 /*
1006 * The i8254x doesn't apparently respond when the
1007 * I/O BAR is 0, which looks somewhat like it's not
1008 * been configured.
1009 */
1010 preg = pci_conf_read(pc, pa->pa_tag, i);
1011 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1012 aprint_error_dev(&sc->sc_dev, "WARNING: I/O BAR at zero.\n");
1013 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1014 0, &sc->sc_iot, &sc->sc_ioh,
1015 NULL, NULL) == 0) {
1016 sc->sc_flags |= WM_F_IOH_VALID;
1017 } else {
1018 aprint_error_dev(&sc->sc_dev, "WARNING: unable to map "
1019 "I/O space\n");
1020 }
1021 }
1022
1023 }
1024
1025 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1026 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1027 preg |= PCI_COMMAND_MASTER_ENABLE;
1028 if (sc->sc_type < WM_T_82542_2_1)
1029 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1030 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1031
1032 /* power up chip */
1033 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1034 NULL)) && error != EOPNOTSUPP) {
1035 aprint_error_dev(&sc->sc_dev, "cannot activate %d\n",
1036 error);
1037 return;
1038 }
1039
1040 /*
1041 * Map and establish our interrupt.
1042 */
1043 if (pci_intr_map(pa, &ih)) {
1044 aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n");
1045 return;
1046 }
1047 intrstr = pci_intr_string(pc, ih);
1048 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1049 if (sc->sc_ih == NULL) {
1050 aprint_error_dev(&sc->sc_dev, "unable to establish interrupt");
1051 if (intrstr != NULL)
1052 aprint_normal(" at %s", intrstr);
1053 aprint_normal("\n");
1054 return;
1055 }
1056 aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
1057
1058 /*
1059 * Determine a few things about the bus we're connected to.
1060 */
1061 if (sc->sc_type < WM_T_82543) {
1062 /* We don't really know the bus characteristics here. */
1063 sc->sc_bus_speed = 33;
1064 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1065 /*
1066 * CSA (Communication Streaming Architecture) is about as fast
1067 * a 32-bit 66MHz PCI Bus.
1068 */
1069 sc->sc_flags |= WM_F_CSA;
1070 sc->sc_bus_speed = 66;
1071 aprint_verbose_dev(&sc->sc_dev, "Communication Streaming Architecture\n");
1072 if (sc->sc_type == WM_T_82547) {
1073 callout_init(&sc->sc_txfifo_ch, 0);
1074 callout_setfunc(&sc->sc_txfifo_ch,
1075 wm_82547_txfifo_stall, sc);
1076 aprint_verbose_dev(&sc->sc_dev, "using 82547 Tx FIFO stall "
1077 "work-around\n");
1078 }
1079 } else if (sc->sc_type >= WM_T_82571) {
1080 sc->sc_flags |= WM_F_PCIE;
1081 if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9))
1082 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1083 aprint_verbose_dev(&sc->sc_dev, "PCI-Express bus\n");
1084 } else {
1085 reg = CSR_READ(sc, WMREG_STATUS);
1086 if (reg & STATUS_BUS64)
1087 sc->sc_flags |= WM_F_BUS64;
1088 if (sc->sc_type >= WM_T_82544 &&
1089 (reg & STATUS_PCIX_MODE) != 0) {
1090 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1091
1092 sc->sc_flags |= WM_F_PCIX;
1093 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1094 PCI_CAP_PCIX,
1095 &sc->sc_pcix_offset, NULL) == 0)
1096 aprint_error_dev(&sc->sc_dev, "unable to find PCIX "
1097 "capability\n");
1098 else if (sc->sc_type != WM_T_82545_3 &&
1099 sc->sc_type != WM_T_82546_3) {
1100 /*
1101 * Work around a problem caused by the BIOS
1102 * setting the max memory read byte count
1103 * incorrectly.
1104 */
1105 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1106 sc->sc_pcix_offset + PCI_PCIX_CMD);
1107 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1108 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1109
1110 bytecnt =
1111 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1112 PCI_PCIX_CMD_BYTECNT_SHIFT;
1113 maxb =
1114 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1115 PCI_PCIX_STATUS_MAXB_SHIFT;
1116 if (bytecnt > maxb) {
1117 aprint_verbose_dev(&sc->sc_dev, "resetting PCI-X "
1118 "MMRBC: %d -> %d\n",
1119 512 << bytecnt, 512 << maxb);
1120 pcix_cmd = (pcix_cmd &
1121 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1122 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1123 pci_conf_write(pa->pa_pc, pa->pa_tag,
1124 sc->sc_pcix_offset + PCI_PCIX_CMD,
1125 pcix_cmd);
1126 }
1127 }
1128 }
1129 /*
1130 * The quad port adapter is special; it has a PCIX-PCIX
1131 * bridge on the board, and can run the secondary bus at
1132 * a higher speed.
1133 */
1134 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1135 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1136 : 66;
1137 } else if (sc->sc_flags & WM_F_PCIX) {
1138 switch (reg & STATUS_PCIXSPD_MASK) {
1139 case STATUS_PCIXSPD_50_66:
1140 sc->sc_bus_speed = 66;
1141 break;
1142 case STATUS_PCIXSPD_66_100:
1143 sc->sc_bus_speed = 100;
1144 break;
1145 case STATUS_PCIXSPD_100_133:
1146 sc->sc_bus_speed = 133;
1147 break;
1148 default:
1149 aprint_error_dev(&sc->sc_dev,
1150 "unknown PCIXSPD %d; assuming 66MHz\n",
1151 reg & STATUS_PCIXSPD_MASK);
1152 sc->sc_bus_speed = 66;
1153 }
1154 } else
1155 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1156 aprint_verbose_dev(&sc->sc_dev, "%d-bit %dMHz %s bus\n",
1157 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1158 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1159 }
1160
1161 /*
1162 * Allocate the control data structures, and create and load the
1163 * DMA map for it.
1164 *
1165 * NOTE: All Tx descriptors must be in the same 4G segment of
1166 * memory. So must Rx descriptors. We simplify by allocating
1167 * both sets within the same 4G segment.
1168 */
1169 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1170 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1171 cdata_size = sc->sc_type < WM_T_82544 ?
1172 sizeof(struct wm_control_data_82542) :
1173 sizeof(struct wm_control_data_82544);
1174 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1175 (bus_size_t) 0x100000000ULL,
1176 &seg, 1, &rseg, 0)) != 0) {
1177 aprint_error_dev(&sc->sc_dev,
1178 "unable to allocate control data, error = %d\n",
1179 error);
1180 goto fail_0;
1181 }
1182
1183 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1184 (void **)&sc->sc_control_data,
1185 BUS_DMA_COHERENT)) != 0) {
1186 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n",
1187 error);
1188 goto fail_1;
1189 }
1190
1191 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1192 0, 0, &sc->sc_cddmamap)) != 0) {
1193 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
1194 "error = %d\n", error);
1195 goto fail_2;
1196 }
1197
1198 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1199 sc->sc_control_data, cdata_size, NULL,
1200 0)) != 0) {
1201 aprint_error_dev(&sc->sc_dev,
1202 "unable to load control data DMA map, error = %d\n",
1203 error);
1204 goto fail_3;
1205 }
1206
1207
1208 /*
1209 * Create the transmit buffer DMA maps.
1210 */
1211 WM_TXQUEUELEN(sc) =
1212 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1213 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1214 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1215 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1216 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1217 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1218 aprint_error_dev(&sc->sc_dev, "unable to create Tx DMA map %d, "
1219 "error = %d\n", i, error);
1220 goto fail_4;
1221 }
1222 }
1223
1224 /*
1225 * Create the receive buffer DMA maps.
1226 */
1227 for (i = 0; i < WM_NRXDESC; i++) {
1228 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1229 MCLBYTES, 0, 0,
1230 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1231 aprint_error_dev(&sc->sc_dev, "unable to create Rx DMA map %d, "
1232 "error = %d\n", i, error);
1233 goto fail_5;
1234 }
1235 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1236 }
1237
1238 /* clear interesting stat counters */
1239 CSR_READ(sc, WMREG_COLC);
1240 CSR_READ(sc, WMREG_RXERRC);
1241
1242 /*
1243 * Reset the chip to a known state.
1244 */
1245 wm_reset(sc);
1246
1247 /*
1248 * Get some information about the EEPROM.
1249 */
1250 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1251 uint32_t flash_size;
1252 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1253 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1254 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1255 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1256 aprint_error_dev(&sc->sc_dev, "can't map FLASH registers\n");
1257 return;
1258 }
1259 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1260 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1261 ICH_FLASH_SECTOR_SIZE;
1262 sc->sc_ich8_flash_bank_size =
1263 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1264 sc->sc_ich8_flash_bank_size -=
1265 (flash_size & ICH_GFPREG_BASE_MASK);
1266 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1267 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1268 } else if (sc->sc_type == WM_T_80003)
1269 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1270 else if (sc->sc_type == WM_T_82573)
1271 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1272 else if (sc->sc_type > WM_T_82544)
1273 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1274
1275 if (sc->sc_type <= WM_T_82544)
1276 sc->sc_ee_addrbits = 6;
1277 else if (sc->sc_type <= WM_T_82546_3) {
1278 reg = CSR_READ(sc, WMREG_EECD);
1279 if (reg & EECD_EE_SIZE)
1280 sc->sc_ee_addrbits = 8;
1281 else
1282 sc->sc_ee_addrbits = 6;
1283 } else if (sc->sc_type <= WM_T_82547_2) {
1284 reg = CSR_READ(sc, WMREG_EECD);
1285 if (reg & EECD_EE_TYPE) {
1286 sc->sc_flags |= WM_F_EEPROM_SPI;
1287 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1288 } else
1289 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1290 } else if ((sc->sc_type == WM_T_82573) &&
1291 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1292 sc->sc_flags |= WM_F_EEPROM_FLASH;
1293 } else {
1294 /* Assume everything else is SPI. */
1295 reg = CSR_READ(sc, WMREG_EECD);
1296 sc->sc_flags |= WM_F_EEPROM_SPI;
1297 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1298 }
1299
1300 /*
1301 * Defer printing the EEPROM type until after verifying the checksum
1302 * This allows the EEPROM type to be printed correctly in the case
1303 * that no EEPROM is attached.
1304 */
1305
1306
1307 /*
1308 * Validate the EEPROM checksum. If the checksum fails, flag this for
1309 * later, so we can fail future reads from the EEPROM.
1310 */
1311 if (wm_validate_eeprom_checksum(sc))
1312 sc->sc_flags |= WM_F_EEPROM_INVALID;
1313
1314 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1315 aprint_verbose_dev(&sc->sc_dev, "No EEPROM\n");
1316 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1317 aprint_verbose_dev(&sc->sc_dev, "FLASH\n");
1318 } else {
1319 if (sc->sc_flags & WM_F_EEPROM_SPI)
1320 eetype = "SPI";
1321 else
1322 eetype = "MicroWire";
1323 aprint_verbose_dev(&sc->sc_dev, "%u word (%d address bits) %s EEPROM\n",
1324 1U << sc->sc_ee_addrbits,
1325 sc->sc_ee_addrbits, eetype);
1326 }
1327
1328 /*
1329 * Read the Ethernet address from the EEPROM, if not first found
1330 * in device properties.
1331 */
1332 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1333 if (ea != NULL) {
1334 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1335 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1336 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1337 } else {
1338 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1339 sizeof(myea) / sizeof(myea[0]), myea)) {
1340 aprint_error_dev(&sc->sc_dev, "unable to read Ethernet address\n");
1341 return;
1342 }
1343 enaddr[0] = myea[0] & 0xff;
1344 enaddr[1] = myea[0] >> 8;
1345 enaddr[2] = myea[1] & 0xff;
1346 enaddr[3] = myea[1] >> 8;
1347 enaddr[4] = myea[2] & 0xff;
1348 enaddr[5] = myea[2] >> 8;
1349 }
1350
1351 /*
1352 * Toggle the LSB of the MAC address on the second port
1353 * of the dual port controller.
1354 */
1355 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1356 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1357 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1358 enaddr[5] ^= 1;
1359 }
1360
1361 aprint_normal_dev(&sc->sc_dev, "Ethernet address %s\n",
1362 ether_sprintf(enaddr));
1363
1364 /*
1365 * Read the config info from the EEPROM, and set up various
1366 * bits in the control registers based on their contents.
1367 */
1368 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1369 "i82543-cfg1");
1370 if (pn != NULL) {
1371 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1372 cfg1 = (uint16_t) prop_number_integer_value(pn);
1373 } else {
1374 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1375 aprint_error_dev(&sc->sc_dev, "unable to read CFG1\n");
1376 return;
1377 }
1378 }
1379
1380 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1381 "i82543-cfg2");
1382 if (pn != NULL) {
1383 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1384 cfg2 = (uint16_t) prop_number_integer_value(pn);
1385 } else {
1386 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1387 aprint_error_dev(&sc->sc_dev, "unable to read CFG2\n");
1388 return;
1389 }
1390 }
1391
1392 if (sc->sc_type >= WM_T_82544) {
1393 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1394 "i82543-swdpin");
1395 if (pn != NULL) {
1396 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1397 swdpin = (uint16_t) prop_number_integer_value(pn);
1398 } else {
1399 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1400 aprint_error_dev(&sc->sc_dev, "unable to read SWDPIN\n");
1401 return;
1402 }
1403 }
1404 }
1405
1406 if (cfg1 & EEPROM_CFG1_ILOS)
1407 sc->sc_ctrl |= CTRL_ILOS;
1408 if (sc->sc_type >= WM_T_82544) {
1409 sc->sc_ctrl |=
1410 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1411 CTRL_SWDPIO_SHIFT;
1412 sc->sc_ctrl |=
1413 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1414 CTRL_SWDPINS_SHIFT;
1415 } else {
1416 sc->sc_ctrl |=
1417 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1418 CTRL_SWDPIO_SHIFT;
1419 }
1420
1421 #if 0
1422 if (sc->sc_type >= WM_T_82544) {
1423 if (cfg1 & EEPROM_CFG1_IPS0)
1424 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1425 if (cfg1 & EEPROM_CFG1_IPS1)
1426 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1427 sc->sc_ctrl_ext |=
1428 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1429 CTRL_EXT_SWDPIO_SHIFT;
1430 sc->sc_ctrl_ext |=
1431 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1432 CTRL_EXT_SWDPINS_SHIFT;
1433 } else {
1434 sc->sc_ctrl_ext |=
1435 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1436 CTRL_EXT_SWDPIO_SHIFT;
1437 }
1438 #endif
1439
1440 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1441 #if 0
1442 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1443 #endif
1444
1445 /*
1446 * Set up some register offsets that are different between
1447 * the i82542 and the i82543 and later chips.
1448 */
1449 if (sc->sc_type < WM_T_82543) {
1450 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1451 sc->sc_tdt_reg = WMREG_OLD_TDT;
1452 } else {
1453 sc->sc_rdt_reg = WMREG_RDT;
1454 sc->sc_tdt_reg = WMREG_TDT;
1455 }
1456
1457 /*
1458 * Determine if we're TBI or GMII mode, and initialize the
1459 * media structures accordingly.
1460 */
1461 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1462 || sc->sc_type == WM_T_82573) {
1463 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1464 wm_gmii_mediainit(sc);
1465 } else if (sc->sc_type < WM_T_82543 ||
1466 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1467 if (wmp->wmp_flags & WMP_F_1000T)
1468 aprint_error_dev(&sc->sc_dev, "WARNING: TBIMODE set on 1000BASE-T "
1469 "product!\n");
1470 wm_tbi_mediainit(sc);
1471 } else {
1472 if (wmp->wmp_flags & WMP_F_1000X)
1473 aprint_error_dev(&sc->sc_dev, "WARNING: TBIMODE clear on 1000BASE-X "
1474 "product!\n");
1475 wm_gmii_mediainit(sc);
1476 }
1477
1478 ifp = &sc->sc_ethercom.ec_if;
1479 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
1480 ifp->if_softc = sc;
1481 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1482 ifp->if_ioctl = wm_ioctl;
1483 ifp->if_start = wm_start;
1484 ifp->if_watchdog = wm_watchdog;
1485 ifp->if_init = wm_init;
1486 ifp->if_stop = wm_stop;
1487 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1488 IFQ_SET_READY(&ifp->if_snd);
1489
1490 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1491 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1492
1493 /*
1494 * If we're a i82543 or greater, we can support VLANs.
1495 */
1496 if (sc->sc_type >= WM_T_82543)
1497 sc->sc_ethercom.ec_capabilities |=
1498 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1499
1500 /*
1501 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1502 * on i82543 and later.
1503 */
1504 if (sc->sc_type >= WM_T_82543) {
1505 ifp->if_capabilities |=
1506 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1507 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1508 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1509 IFCAP_CSUM_TCPv6_Tx |
1510 IFCAP_CSUM_UDPv6_Tx;
1511 }
1512
1513 /*
1514 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1515 *
1516 * 82541GI (8086:1076) ... no
1517 * 82572EI (8086:10b9) ... yes
1518 */
1519 if (sc->sc_type >= WM_T_82571) {
1520 ifp->if_capabilities |=
1521 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1522 }
1523
1524 /*
1525 * If we're a i82544 or greater (except i82547), we can do
1526 * TCP segmentation offload.
1527 */
1528 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1529 ifp->if_capabilities |= IFCAP_TSOv4;
1530 }
1531
1532 if (sc->sc_type >= WM_T_82571) {
1533 ifp->if_capabilities |= IFCAP_TSOv6;
1534 }
1535
1536 /*
1537 * Attach the interface.
1538 */
1539 if_attach(ifp);
1540 ether_ifattach(ifp, enaddr);
1541 #if NRND > 0
1542 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev),
1543 RND_TYPE_NET, 0);
1544 #endif
1545
1546 #ifdef WM_EVENT_COUNTERS
1547 /* Attach event counters. */
1548 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1549 NULL, device_xname(&sc->sc_dev), "txsstall");
1550 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1551 NULL, device_xname(&sc->sc_dev), "txdstall");
1552 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1553 NULL, device_xname(&sc->sc_dev), "txfifo_stall");
1554 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1555 NULL, device_xname(&sc->sc_dev), "txdw");
1556 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1557 NULL, device_xname(&sc->sc_dev), "txqe");
1558 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1559 NULL, device_xname(&sc->sc_dev), "rxintr");
1560 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1561 NULL, device_xname(&sc->sc_dev), "linkintr");
1562
1563 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1564 NULL, device_xname(&sc->sc_dev), "rxipsum");
1565 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1566 NULL, device_xname(&sc->sc_dev), "rxtusum");
1567 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1568 NULL, device_xname(&sc->sc_dev), "txipsum");
1569 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1570 NULL, device_xname(&sc->sc_dev), "txtusum");
1571 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1572 NULL, device_xname(&sc->sc_dev), "txtusum6");
1573
1574 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1575 NULL, device_xname(&sc->sc_dev), "txtso");
1576 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1577 NULL, device_xname(&sc->sc_dev), "txtso6");
1578 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1579 NULL, device_xname(&sc->sc_dev), "txtsopain");
1580
1581 for (i = 0; i < WM_NTXSEGS; i++) {
1582 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1583 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1584 NULL, device_xname(&sc->sc_dev), wm_txseg_evcnt_names[i]);
1585 }
1586
1587 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1588 NULL, device_xname(&sc->sc_dev), "txdrop");
1589
1590 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1591 NULL, device_xname(&sc->sc_dev), "tu");
1592
1593 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1594 NULL, device_xname(&sc->sc_dev), "tx_xoff");
1595 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1596 NULL, device_xname(&sc->sc_dev), "tx_xon");
1597 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1598 NULL, device_xname(&sc->sc_dev), "rx_xoff");
1599 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1600 NULL, device_xname(&sc->sc_dev), "rx_xon");
1601 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1602 NULL, device_xname(&sc->sc_dev), "rx_macctl");
1603 #endif /* WM_EVENT_COUNTERS */
1604
1605 if (!pmf_device_register(self, NULL, NULL))
1606 aprint_error_dev(self, "couldn't establish power handler\n");
1607 else
1608 pmf_class_network_register(self, ifp);
1609
1610 return;
1611
1612 /*
1613 * Free any resources we've allocated during the failed attach
1614 * attempt. Do this in reverse order and fall through.
1615 */
1616 fail_5:
1617 for (i = 0; i < WM_NRXDESC; i++) {
1618 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1619 bus_dmamap_destroy(sc->sc_dmat,
1620 sc->sc_rxsoft[i].rxs_dmamap);
1621 }
1622 fail_4:
1623 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1624 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1625 bus_dmamap_destroy(sc->sc_dmat,
1626 sc->sc_txsoft[i].txs_dmamap);
1627 }
1628 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1629 fail_3:
1630 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1631 fail_2:
1632 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1633 cdata_size);
1634 fail_1:
1635 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1636 fail_0:
1637 return;
1638 }
1639
1640 /*
1641 * wm_tx_offload:
1642 *
1643 * Set up TCP/IP checksumming parameters for the
1644 * specified packet.
1645 */
1646 static int
1647 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1648 uint8_t *fieldsp)
1649 {
1650 struct mbuf *m0 = txs->txs_mbuf;
1651 struct livengood_tcpip_ctxdesc *t;
1652 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1653 uint32_t ipcse;
1654 struct ether_header *eh;
1655 int offset, iphl;
1656 uint8_t fields;
1657
1658 /*
1659 * XXX It would be nice if the mbuf pkthdr had offset
1660 * fields for the protocol headers.
1661 */
1662
1663 eh = mtod(m0, struct ether_header *);
1664 switch (htons(eh->ether_type)) {
1665 case ETHERTYPE_IP:
1666 case ETHERTYPE_IPV6:
1667 offset = ETHER_HDR_LEN;
1668 break;
1669
1670 case ETHERTYPE_VLAN:
1671 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1672 break;
1673
1674 default:
1675 /*
1676 * Don't support this protocol or encapsulation.
1677 */
1678 *fieldsp = 0;
1679 *cmdp = 0;
1680 return (0);
1681 }
1682
1683 if ((m0->m_pkthdr.csum_flags &
1684 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1685 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1686 } else {
1687 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1688 }
1689 ipcse = offset + iphl - 1;
1690
1691 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1692 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1693 seg = 0;
1694 fields = 0;
1695
1696 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1697 int hlen = offset + iphl;
1698 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1699
1700 if (__predict_false(m0->m_len <
1701 (hlen + sizeof(struct tcphdr)))) {
1702 /*
1703 * TCP/IP headers are not in the first mbuf; we need
1704 * to do this the slow and painful way. Let's just
1705 * hope this doesn't happen very often.
1706 */
1707 struct tcphdr th;
1708
1709 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1710
1711 m_copydata(m0, hlen, sizeof(th), &th);
1712 if (v4) {
1713 struct ip ip;
1714
1715 m_copydata(m0, offset, sizeof(ip), &ip);
1716 ip.ip_len = 0;
1717 m_copyback(m0,
1718 offset + offsetof(struct ip, ip_len),
1719 sizeof(ip.ip_len), &ip.ip_len);
1720 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1721 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1722 } else {
1723 struct ip6_hdr ip6;
1724
1725 m_copydata(m0, offset, sizeof(ip6), &ip6);
1726 ip6.ip6_plen = 0;
1727 m_copyback(m0,
1728 offset + offsetof(struct ip6_hdr, ip6_plen),
1729 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1730 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1731 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1732 }
1733 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1734 sizeof(th.th_sum), &th.th_sum);
1735
1736 hlen += th.th_off << 2;
1737 } else {
1738 /*
1739 * TCP/IP headers are in the first mbuf; we can do
1740 * this the easy way.
1741 */
1742 struct tcphdr *th;
1743
1744 if (v4) {
1745 struct ip *ip =
1746 (void *)(mtod(m0, char *) + offset);
1747 th = (void *)(mtod(m0, char *) + hlen);
1748
1749 ip->ip_len = 0;
1750 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1751 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1752 } else {
1753 struct ip6_hdr *ip6 =
1754 (void *)(mtod(m0, char *) + offset);
1755 th = (void *)(mtod(m0, char *) + hlen);
1756
1757 ip6->ip6_plen = 0;
1758 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1759 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1760 }
1761 hlen += th->th_off << 2;
1762 }
1763
1764 if (v4) {
1765 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1766 cmdlen |= WTX_TCPIP_CMD_IP;
1767 } else {
1768 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1769 ipcse = 0;
1770 }
1771 cmd |= WTX_TCPIP_CMD_TSE;
1772 cmdlen |= WTX_TCPIP_CMD_TSE |
1773 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1774 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1775 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1776 }
1777
1778 /*
1779 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1780 * offload feature, if we load the context descriptor, we
1781 * MUST provide valid values for IPCSS and TUCSS fields.
1782 */
1783
1784 ipcs = WTX_TCPIP_IPCSS(offset) |
1785 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1786 WTX_TCPIP_IPCSE(ipcse);
1787 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1788 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1789 fields |= WTX_IXSM;
1790 }
1791
1792 offset += iphl;
1793
1794 if (m0->m_pkthdr.csum_flags &
1795 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1796 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1797 fields |= WTX_TXSM;
1798 tucs = WTX_TCPIP_TUCSS(offset) |
1799 WTX_TCPIP_TUCSO(offset +
1800 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1801 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1802 } else if ((m0->m_pkthdr.csum_flags &
1803 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1804 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1805 fields |= WTX_TXSM;
1806 tucs = WTX_TCPIP_TUCSS(offset) |
1807 WTX_TCPIP_TUCSO(offset +
1808 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1809 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1810 } else {
1811 /* Just initialize it to a valid TCP context. */
1812 tucs = WTX_TCPIP_TUCSS(offset) |
1813 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1814 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1815 }
1816
1817 /* Fill in the context descriptor. */
1818 t = (struct livengood_tcpip_ctxdesc *)
1819 &sc->sc_txdescs[sc->sc_txnext];
1820 t->tcpip_ipcs = htole32(ipcs);
1821 t->tcpip_tucs = htole32(tucs);
1822 t->tcpip_cmdlen = htole32(cmdlen);
1823 t->tcpip_seg = htole32(seg);
1824 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1825
1826 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1827 txs->txs_ndesc++;
1828
1829 *cmdp = cmd;
1830 *fieldsp = fields;
1831
1832 return (0);
1833 }
1834
1835 static void
1836 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1837 {
1838 struct mbuf *m;
1839 int i;
1840
1841 log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(&sc->sc_dev));
1842 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1843 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1844 "m_flags = 0x%08x\n", device_xname(&sc->sc_dev),
1845 m->m_data, m->m_len, m->m_flags);
1846 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(&sc->sc_dev),
1847 i, i == 1 ? "" : "s");
1848 }
1849
1850 /*
1851 * wm_82547_txfifo_stall:
1852 *
1853 * Callout used to wait for the 82547 Tx FIFO to drain,
1854 * reset the FIFO pointers, and restart packet transmission.
1855 */
1856 static void
1857 wm_82547_txfifo_stall(void *arg)
1858 {
1859 struct wm_softc *sc = arg;
1860 int s;
1861
1862 s = splnet();
1863
1864 if (sc->sc_txfifo_stall) {
1865 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1866 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1867 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1868 /*
1869 * Packets have drained. Stop transmitter, reset
1870 * FIFO pointers, restart transmitter, and kick
1871 * the packet queue.
1872 */
1873 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1874 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1875 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1876 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1877 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1878 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1879 CSR_WRITE(sc, WMREG_TCTL, tctl);
1880 CSR_WRITE_FLUSH(sc);
1881
1882 sc->sc_txfifo_head = 0;
1883 sc->sc_txfifo_stall = 0;
1884 wm_start(&sc->sc_ethercom.ec_if);
1885 } else {
1886 /*
1887 * Still waiting for packets to drain; try again in
1888 * another tick.
1889 */
1890 callout_schedule(&sc->sc_txfifo_ch, 1);
1891 }
1892 }
1893
1894 splx(s);
1895 }
1896
1897 /*
1898 * wm_82547_txfifo_bugchk:
1899 *
1900 * Check for bug condition in the 82547 Tx FIFO. We need to
1901 * prevent enqueueing a packet that would wrap around the end
1902 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1903 *
1904 * We do this by checking the amount of space before the end
1905 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1906 * the Tx FIFO, wait for all remaining packets to drain, reset
1907 * the internal FIFO pointers to the beginning, and restart
1908 * transmission on the interface.
1909 */
1910 #define WM_FIFO_HDR 0x10
1911 #define WM_82547_PAD_LEN 0x3e0
1912 static int
1913 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1914 {
1915 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1916 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1917
1918 /* Just return if already stalled. */
1919 if (sc->sc_txfifo_stall)
1920 return (1);
1921
1922 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1923 /* Stall only occurs in half-duplex mode. */
1924 goto send_packet;
1925 }
1926
1927 if (len >= WM_82547_PAD_LEN + space) {
1928 sc->sc_txfifo_stall = 1;
1929 callout_schedule(&sc->sc_txfifo_ch, 1);
1930 return (1);
1931 }
1932
1933 send_packet:
1934 sc->sc_txfifo_head += len;
1935 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1936 sc->sc_txfifo_head -= sc->sc_txfifo_size;
1937
1938 return (0);
1939 }
1940
1941 /*
1942 * wm_start: [ifnet interface function]
1943 *
1944 * Start packet transmission on the interface.
1945 */
1946 static void
1947 wm_start(struct ifnet *ifp)
1948 {
1949 struct wm_softc *sc = ifp->if_softc;
1950 struct mbuf *m0;
1951 #if 0 /* XXXJRT */
1952 struct m_tag *mtag;
1953 #endif
1954 struct wm_txsoft *txs;
1955 bus_dmamap_t dmamap;
1956 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
1957 bus_addr_t curaddr;
1958 bus_size_t seglen, curlen;
1959 uint32_t cksumcmd;
1960 uint8_t cksumfields;
1961
1962 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1963 return;
1964
1965 /*
1966 * Remember the previous number of free descriptors.
1967 */
1968 ofree = sc->sc_txfree;
1969
1970 /*
1971 * Loop through the send queue, setting up transmit descriptors
1972 * until we drain the queue, or use up all available transmit
1973 * descriptors.
1974 */
1975 for (;;) {
1976 /* Grab a packet off the queue. */
1977 IFQ_POLL(&ifp->if_snd, m0);
1978 if (m0 == NULL)
1979 break;
1980
1981 DPRINTF(WM_DEBUG_TX,
1982 ("%s: TX: have packet to transmit: %p\n",
1983 device_xname(&sc->sc_dev), m0));
1984
1985 /* Get a work queue entry. */
1986 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
1987 wm_txintr(sc);
1988 if (sc->sc_txsfree == 0) {
1989 DPRINTF(WM_DEBUG_TX,
1990 ("%s: TX: no free job descriptors\n",
1991 device_xname(&sc->sc_dev)));
1992 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1993 break;
1994 }
1995 }
1996
1997 txs = &sc->sc_txsoft[sc->sc_txsnext];
1998 dmamap = txs->txs_dmamap;
1999
2000 use_tso = (m0->m_pkthdr.csum_flags &
2001 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2002
2003 /*
2004 * So says the Linux driver:
2005 * The controller does a simple calculation to make sure
2006 * there is enough room in the FIFO before initiating the
2007 * DMA for each buffer. The calc is:
2008 * 4 = ceil(buffer len / MSS)
2009 * To make sure we don't overrun the FIFO, adjust the max
2010 * buffer len if the MSS drops.
2011 */
2012 dmamap->dm_maxsegsz =
2013 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2014 ? m0->m_pkthdr.segsz << 2
2015 : WTX_MAX_LEN;
2016
2017 /*
2018 * Load the DMA map. If this fails, the packet either
2019 * didn't fit in the allotted number of segments, or we
2020 * were short on resources. For the too-many-segments
2021 * case, we simply report an error and drop the packet,
2022 * since we can't sanely copy a jumbo packet to a single
2023 * buffer.
2024 */
2025 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2026 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2027 if (error) {
2028 if (error == EFBIG) {
2029 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2030 log(LOG_ERR, "%s: Tx packet consumes too many "
2031 "DMA segments, dropping...\n",
2032 device_xname(&sc->sc_dev));
2033 IFQ_DEQUEUE(&ifp->if_snd, m0);
2034 wm_dump_mbuf_chain(sc, m0);
2035 m_freem(m0);
2036 continue;
2037 }
2038 /*
2039 * Short on resources, just stop for now.
2040 */
2041 DPRINTF(WM_DEBUG_TX,
2042 ("%s: TX: dmamap load failed: %d\n",
2043 device_xname(&sc->sc_dev), error));
2044 break;
2045 }
2046
2047 segs_needed = dmamap->dm_nsegs;
2048 if (use_tso) {
2049 /* For sentinel descriptor; see below. */
2050 segs_needed++;
2051 }
2052
2053 /*
2054 * Ensure we have enough descriptors free to describe
2055 * the packet. Note, we always reserve one descriptor
2056 * at the end of the ring due to the semantics of the
2057 * TDT register, plus one more in the event we need
2058 * to load offload context.
2059 */
2060 if (segs_needed > sc->sc_txfree - 2) {
2061 /*
2062 * Not enough free descriptors to transmit this
2063 * packet. We haven't committed anything yet,
2064 * so just unload the DMA map, put the packet
2065 * pack on the queue, and punt. Notify the upper
2066 * layer that there are no more slots left.
2067 */
2068 DPRINTF(WM_DEBUG_TX,
2069 ("%s: TX: need %d (%d) descriptors, have %d\n",
2070 device_xname(&sc->sc_dev), dmamap->dm_nsegs, segs_needed,
2071 sc->sc_txfree - 1));
2072 ifp->if_flags |= IFF_OACTIVE;
2073 bus_dmamap_unload(sc->sc_dmat, dmamap);
2074 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2075 break;
2076 }
2077
2078 /*
2079 * Check for 82547 Tx FIFO bug. We need to do this
2080 * once we know we can transmit the packet, since we
2081 * do some internal FIFO space accounting here.
2082 */
2083 if (sc->sc_type == WM_T_82547 &&
2084 wm_82547_txfifo_bugchk(sc, m0)) {
2085 DPRINTF(WM_DEBUG_TX,
2086 ("%s: TX: 82547 Tx FIFO bug detected\n",
2087 device_xname(&sc->sc_dev)));
2088 ifp->if_flags |= IFF_OACTIVE;
2089 bus_dmamap_unload(sc->sc_dmat, dmamap);
2090 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2091 break;
2092 }
2093
2094 IFQ_DEQUEUE(&ifp->if_snd, m0);
2095
2096 /*
2097 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2098 */
2099
2100 DPRINTF(WM_DEBUG_TX,
2101 ("%s: TX: packet has %d (%d) DMA segments\n",
2102 device_xname(&sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2103
2104 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2105
2106 /*
2107 * Store a pointer to the packet so that we can free it
2108 * later.
2109 *
2110 * Initially, we consider the number of descriptors the
2111 * packet uses the number of DMA segments. This may be
2112 * incremented by 1 if we do checksum offload (a descriptor
2113 * is used to set the checksum context).
2114 */
2115 txs->txs_mbuf = m0;
2116 txs->txs_firstdesc = sc->sc_txnext;
2117 txs->txs_ndesc = segs_needed;
2118
2119 /* Set up offload parameters for this packet. */
2120 if (m0->m_pkthdr.csum_flags &
2121 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2122 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2123 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2124 if (wm_tx_offload(sc, txs, &cksumcmd,
2125 &cksumfields) != 0) {
2126 /* Error message already displayed. */
2127 bus_dmamap_unload(sc->sc_dmat, dmamap);
2128 continue;
2129 }
2130 } else {
2131 cksumcmd = 0;
2132 cksumfields = 0;
2133 }
2134
2135 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2136
2137 /* Sync the DMA map. */
2138 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2139 BUS_DMASYNC_PREWRITE);
2140
2141 /*
2142 * Initialize the transmit descriptor.
2143 */
2144 for (nexttx = sc->sc_txnext, seg = 0;
2145 seg < dmamap->dm_nsegs; seg++) {
2146 for (seglen = dmamap->dm_segs[seg].ds_len,
2147 curaddr = dmamap->dm_segs[seg].ds_addr;
2148 seglen != 0;
2149 curaddr += curlen, seglen -= curlen,
2150 nexttx = WM_NEXTTX(sc, nexttx)) {
2151 curlen = seglen;
2152
2153 /*
2154 * So says the Linux driver:
2155 * Work around for premature descriptor
2156 * write-backs in TSO mode. Append a
2157 * 4-byte sentinel descriptor.
2158 */
2159 if (use_tso &&
2160 seg == dmamap->dm_nsegs - 1 &&
2161 curlen > 8)
2162 curlen -= 4;
2163
2164 wm_set_dma_addr(
2165 &sc->sc_txdescs[nexttx].wtx_addr,
2166 curaddr);
2167 sc->sc_txdescs[nexttx].wtx_cmdlen =
2168 htole32(cksumcmd | curlen);
2169 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2170 0;
2171 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2172 cksumfields;
2173 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2174 lasttx = nexttx;
2175
2176 DPRINTF(WM_DEBUG_TX,
2177 ("%s: TX: desc %d: low 0x%08lx, "
2178 "len 0x%04x\n",
2179 device_xname(&sc->sc_dev), nexttx,
2180 curaddr & 0xffffffffUL, (unsigned)curlen));
2181 }
2182 }
2183
2184 KASSERT(lasttx != -1);
2185
2186 /*
2187 * Set up the command byte on the last descriptor of
2188 * the packet. If we're in the interrupt delay window,
2189 * delay the interrupt.
2190 */
2191 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2192 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2193
2194 #if 0 /* XXXJRT */
2195 /*
2196 * If VLANs are enabled and the packet has a VLAN tag, set
2197 * up the descriptor to encapsulate the packet for us.
2198 *
2199 * This is only valid on the last descriptor of the packet.
2200 */
2201 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2202 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2203 htole32(WTX_CMD_VLE);
2204 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2205 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2206 }
2207 #endif /* XXXJRT */
2208
2209 txs->txs_lastdesc = lasttx;
2210
2211 DPRINTF(WM_DEBUG_TX,
2212 ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(&sc->sc_dev),
2213 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2214
2215 /* Sync the descriptors we're using. */
2216 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2217 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2218
2219 /* Give the packet to the chip. */
2220 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2221
2222 DPRINTF(WM_DEBUG_TX,
2223 ("%s: TX: TDT -> %d\n", device_xname(&sc->sc_dev), nexttx));
2224
2225 DPRINTF(WM_DEBUG_TX,
2226 ("%s: TX: finished transmitting packet, job %d\n",
2227 device_xname(&sc->sc_dev), sc->sc_txsnext));
2228
2229 /* Advance the tx pointer. */
2230 sc->sc_txfree -= txs->txs_ndesc;
2231 sc->sc_txnext = nexttx;
2232
2233 sc->sc_txsfree--;
2234 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2235
2236 #if NBPFILTER > 0
2237 /* Pass the packet to any BPF listeners. */
2238 if (ifp->if_bpf)
2239 bpf_mtap(ifp->if_bpf, m0);
2240 #endif /* NBPFILTER > 0 */
2241 }
2242
2243 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2244 /* No more slots; notify upper layer. */
2245 ifp->if_flags |= IFF_OACTIVE;
2246 }
2247
2248 if (sc->sc_txfree != ofree) {
2249 /* Set a watchdog timer in case the chip flakes out. */
2250 ifp->if_timer = 5;
2251 }
2252 }
2253
2254 /*
2255 * wm_watchdog: [ifnet interface function]
2256 *
2257 * Watchdog timer handler.
2258 */
2259 static void
2260 wm_watchdog(struct ifnet *ifp)
2261 {
2262 struct wm_softc *sc = ifp->if_softc;
2263
2264 /*
2265 * Since we're using delayed interrupts, sweep up
2266 * before we report an error.
2267 */
2268 wm_txintr(sc);
2269
2270 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2271 log(LOG_ERR,
2272 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2273 device_xname(&sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
2274 sc->sc_txnext);
2275 ifp->if_oerrors++;
2276
2277 /* Reset the interface. */
2278 (void) wm_init(ifp);
2279 }
2280
2281 /* Try to get more packets going. */
2282 wm_start(ifp);
2283 }
2284
2285 /*
2286 * wm_ioctl: [ifnet interface function]
2287 *
2288 * Handle control requests from the operator.
2289 */
2290 static int
2291 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2292 {
2293 struct wm_softc *sc = ifp->if_softc;
2294 struct ifreq *ifr = (struct ifreq *) data;
2295 int s, error;
2296
2297 s = splnet();
2298
2299 switch (cmd) {
2300 case SIOCSIFMEDIA:
2301 case SIOCGIFMEDIA:
2302 /* Flow control requires full-duplex mode. */
2303 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2304 (ifr->ifr_media & IFM_FDX) == 0)
2305 ifr->ifr_media &= ~IFM_ETH_FMASK;
2306 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2307 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2308 /* We can do both TXPAUSE and RXPAUSE. */
2309 ifr->ifr_media |=
2310 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2311 }
2312 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2313 }
2314 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2315 break;
2316 default:
2317 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
2318 break;
2319
2320 error = 0;
2321
2322 if (cmd == SIOCSIFCAP)
2323 error = (*ifp->if_init)(ifp);
2324 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2325 ;
2326 else if (ifp->if_flags & IFF_RUNNING) {
2327 /*
2328 * Multicast list has changed; set the hardware filter
2329 * accordingly.
2330 */
2331 wm_set_filter(sc);
2332 }
2333 break;
2334 }
2335
2336 /* Try to get more packets going. */
2337 wm_start(ifp);
2338
2339 splx(s);
2340 return (error);
2341 }
2342
2343 /*
2344 * wm_intr:
2345 *
2346 * Interrupt service routine.
2347 */
2348 static int
2349 wm_intr(void *arg)
2350 {
2351 struct wm_softc *sc = arg;
2352 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2353 uint32_t icr;
2354 int handled = 0;
2355
2356 while (1 /* CONSTCOND */) {
2357 icr = CSR_READ(sc, WMREG_ICR);
2358 if ((icr & sc->sc_icr) == 0)
2359 break;
2360 #if 0 /*NRND > 0*/
2361 if (RND_ENABLED(&sc->rnd_source))
2362 rnd_add_uint32(&sc->rnd_source, icr);
2363 #endif
2364
2365 handled = 1;
2366
2367 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2368 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2369 DPRINTF(WM_DEBUG_RX,
2370 ("%s: RX: got Rx intr 0x%08x\n",
2371 device_xname(&sc->sc_dev),
2372 icr & (ICR_RXDMT0|ICR_RXT0)));
2373 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2374 }
2375 #endif
2376 wm_rxintr(sc);
2377
2378 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2379 if (icr & ICR_TXDW) {
2380 DPRINTF(WM_DEBUG_TX,
2381 ("%s: TX: got TXDW interrupt\n",
2382 device_xname(&sc->sc_dev)));
2383 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2384 }
2385 #endif
2386 wm_txintr(sc);
2387
2388 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2389 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2390 wm_linkintr(sc, icr);
2391 }
2392
2393 if (icr & ICR_RXO) {
2394 ifp->if_ierrors++;
2395 #if defined(WM_DEBUG)
2396 log(LOG_WARNING, "%s: Receive overrun\n",
2397 device_xname(&sc->sc_dev));
2398 #endif /* defined(WM_DEBUG) */
2399 }
2400 }
2401
2402 if (handled) {
2403 /* Try to get more packets going. */
2404 wm_start(ifp);
2405 }
2406
2407 return (handled);
2408 }
2409
2410 /*
2411 * wm_txintr:
2412 *
2413 * Helper; handle transmit interrupts.
2414 */
2415 static void
2416 wm_txintr(struct wm_softc *sc)
2417 {
2418 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2419 struct wm_txsoft *txs;
2420 uint8_t status;
2421 int i;
2422
2423 ifp->if_flags &= ~IFF_OACTIVE;
2424
2425 /*
2426 * Go through the Tx list and free mbufs for those
2427 * frames which have been transmitted.
2428 */
2429 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2430 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2431 txs = &sc->sc_txsoft[i];
2432
2433 DPRINTF(WM_DEBUG_TX,
2434 ("%s: TX: checking job %d\n", device_xname(&sc->sc_dev), i));
2435
2436 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2437 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2438
2439 status =
2440 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2441 if ((status & WTX_ST_DD) == 0) {
2442 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2443 BUS_DMASYNC_PREREAD);
2444 break;
2445 }
2446
2447 DPRINTF(WM_DEBUG_TX,
2448 ("%s: TX: job %d done: descs %d..%d\n",
2449 device_xname(&sc->sc_dev), i, txs->txs_firstdesc,
2450 txs->txs_lastdesc));
2451
2452 /*
2453 * XXX We should probably be using the statistics
2454 * XXX registers, but I don't know if they exist
2455 * XXX on chips before the i82544.
2456 */
2457
2458 #ifdef WM_EVENT_COUNTERS
2459 if (status & WTX_ST_TU)
2460 WM_EVCNT_INCR(&sc->sc_ev_tu);
2461 #endif /* WM_EVENT_COUNTERS */
2462
2463 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2464 ifp->if_oerrors++;
2465 if (status & WTX_ST_LC)
2466 log(LOG_WARNING, "%s: late collision\n",
2467 device_xname(&sc->sc_dev));
2468 else if (status & WTX_ST_EC) {
2469 ifp->if_collisions += 16;
2470 log(LOG_WARNING, "%s: excessive collisions\n",
2471 device_xname(&sc->sc_dev));
2472 }
2473 } else
2474 ifp->if_opackets++;
2475
2476 sc->sc_txfree += txs->txs_ndesc;
2477 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2478 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2479 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2480 m_freem(txs->txs_mbuf);
2481 txs->txs_mbuf = NULL;
2482 }
2483
2484 /* Update the dirty transmit buffer pointer. */
2485 sc->sc_txsdirty = i;
2486 DPRINTF(WM_DEBUG_TX,
2487 ("%s: TX: txsdirty -> %d\n", device_xname(&sc->sc_dev), i));
2488
2489 /*
2490 * If there are no more pending transmissions, cancel the watchdog
2491 * timer.
2492 */
2493 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2494 ifp->if_timer = 0;
2495 }
2496
2497 /*
2498 * wm_rxintr:
2499 *
2500 * Helper; handle receive interrupts.
2501 */
2502 static void
2503 wm_rxintr(struct wm_softc *sc)
2504 {
2505 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2506 struct wm_rxsoft *rxs;
2507 struct mbuf *m;
2508 int i, len;
2509 uint8_t status, errors;
2510
2511 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2512 rxs = &sc->sc_rxsoft[i];
2513
2514 DPRINTF(WM_DEBUG_RX,
2515 ("%s: RX: checking descriptor %d\n",
2516 device_xname(&sc->sc_dev), i));
2517
2518 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2519
2520 status = sc->sc_rxdescs[i].wrx_status;
2521 errors = sc->sc_rxdescs[i].wrx_errors;
2522 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2523
2524 if ((status & WRX_ST_DD) == 0) {
2525 /*
2526 * We have processed all of the receive descriptors.
2527 */
2528 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2529 break;
2530 }
2531
2532 if (__predict_false(sc->sc_rxdiscard)) {
2533 DPRINTF(WM_DEBUG_RX,
2534 ("%s: RX: discarding contents of descriptor %d\n",
2535 device_xname(&sc->sc_dev), i));
2536 WM_INIT_RXDESC(sc, i);
2537 if (status & WRX_ST_EOP) {
2538 /* Reset our state. */
2539 DPRINTF(WM_DEBUG_RX,
2540 ("%s: RX: resetting rxdiscard -> 0\n",
2541 device_xname(&sc->sc_dev)));
2542 sc->sc_rxdiscard = 0;
2543 }
2544 continue;
2545 }
2546
2547 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2548 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2549
2550 m = rxs->rxs_mbuf;
2551
2552 /*
2553 * Add a new receive buffer to the ring, unless of
2554 * course the length is zero. Treat the latter as a
2555 * failed mapping.
2556 */
2557 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2558 /*
2559 * Failed, throw away what we've done so
2560 * far, and discard the rest of the packet.
2561 */
2562 ifp->if_ierrors++;
2563 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2564 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2565 WM_INIT_RXDESC(sc, i);
2566 if ((status & WRX_ST_EOP) == 0)
2567 sc->sc_rxdiscard = 1;
2568 if (sc->sc_rxhead != NULL)
2569 m_freem(sc->sc_rxhead);
2570 WM_RXCHAIN_RESET(sc);
2571 DPRINTF(WM_DEBUG_RX,
2572 ("%s: RX: Rx buffer allocation failed, "
2573 "dropping packet%s\n", device_xname(&sc->sc_dev),
2574 sc->sc_rxdiscard ? " (discard)" : ""));
2575 continue;
2576 }
2577
2578 m->m_len = len;
2579 sc->sc_rxlen += len;
2580 DPRINTF(WM_DEBUG_RX,
2581 ("%s: RX: buffer at %p len %d\n",
2582 device_xname(&sc->sc_dev), m->m_data, len));
2583
2584 /*
2585 * If this is not the end of the packet, keep
2586 * looking.
2587 */
2588 if ((status & WRX_ST_EOP) == 0) {
2589 WM_RXCHAIN_LINK(sc, m);
2590 DPRINTF(WM_DEBUG_RX,
2591 ("%s: RX: not yet EOP, rxlen -> %d\n",
2592 device_xname(&sc->sc_dev), sc->sc_rxlen));
2593 continue;
2594 }
2595
2596 /*
2597 * Okay, we have the entire packet now. The chip is
2598 * configured to include the FCS (not all chips can
2599 * be configured to strip it), so we need to trim it.
2600 * May need to adjust length of previous mbuf in the
2601 * chain if the current mbuf is too short.
2602 */
2603 if (m->m_len < ETHER_CRC_LEN) {
2604 sc->sc_rxtail->m_len -= (ETHER_CRC_LEN - m->m_len);
2605 m->m_len = 0;
2606 } else {
2607 m->m_len -= ETHER_CRC_LEN;
2608 }
2609 len = sc->sc_rxlen - ETHER_CRC_LEN;
2610
2611 WM_RXCHAIN_LINK(sc, m);
2612
2613 *sc->sc_rxtailp = NULL;
2614 m = sc->sc_rxhead;
2615
2616 WM_RXCHAIN_RESET(sc);
2617
2618 DPRINTF(WM_DEBUG_RX,
2619 ("%s: RX: have entire packet, len -> %d\n",
2620 device_xname(&sc->sc_dev), len));
2621
2622 /*
2623 * If an error occurred, update stats and drop the packet.
2624 */
2625 if (errors &
2626 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2627 ifp->if_ierrors++;
2628 if (errors & WRX_ER_SE)
2629 log(LOG_WARNING, "%s: symbol error\n",
2630 device_xname(&sc->sc_dev));
2631 else if (errors & WRX_ER_SEQ)
2632 log(LOG_WARNING, "%s: receive sequence error\n",
2633 device_xname(&sc->sc_dev));
2634 else if (errors & WRX_ER_CE)
2635 log(LOG_WARNING, "%s: CRC error\n",
2636 device_xname(&sc->sc_dev));
2637 m_freem(m);
2638 continue;
2639 }
2640
2641 /*
2642 * No errors. Receive the packet.
2643 */
2644 m->m_pkthdr.rcvif = ifp;
2645 m->m_pkthdr.len = len;
2646
2647 #if 0 /* XXXJRT */
2648 /*
2649 * If VLANs are enabled, VLAN packets have been unwrapped
2650 * for us. Associate the tag with the packet.
2651 */
2652 if ((status & WRX_ST_VP) != 0) {
2653 VLAN_INPUT_TAG(ifp, m,
2654 le16toh(sc->sc_rxdescs[i].wrx_special,
2655 continue);
2656 }
2657 #endif /* XXXJRT */
2658
2659 /*
2660 * Set up checksum info for this packet.
2661 */
2662 if ((status & WRX_ST_IXSM) == 0) {
2663 if (status & WRX_ST_IPCS) {
2664 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2665 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2666 if (errors & WRX_ER_IPE)
2667 m->m_pkthdr.csum_flags |=
2668 M_CSUM_IPv4_BAD;
2669 }
2670 if (status & WRX_ST_TCPCS) {
2671 /*
2672 * Note: we don't know if this was TCP or UDP,
2673 * so we just set both bits, and expect the
2674 * upper layers to deal.
2675 */
2676 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2677 m->m_pkthdr.csum_flags |=
2678 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2679 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2680 if (errors & WRX_ER_TCPE)
2681 m->m_pkthdr.csum_flags |=
2682 M_CSUM_TCP_UDP_BAD;
2683 }
2684 }
2685
2686 ifp->if_ipackets++;
2687
2688 #if NBPFILTER > 0
2689 /* Pass this up to any BPF listeners. */
2690 if (ifp->if_bpf)
2691 bpf_mtap(ifp->if_bpf, m);
2692 #endif /* NBPFILTER > 0 */
2693
2694 /* Pass it on. */
2695 (*ifp->if_input)(ifp, m);
2696 }
2697
2698 /* Update the receive pointer. */
2699 sc->sc_rxptr = i;
2700
2701 DPRINTF(WM_DEBUG_RX,
2702 ("%s: RX: rxptr -> %d\n", device_xname(&sc->sc_dev), i));
2703 }
2704
2705 /*
2706 * wm_linkintr:
2707 *
2708 * Helper; handle link interrupts.
2709 */
2710 static void
2711 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2712 {
2713 uint32_t status;
2714
2715 /*
2716 * If we get a link status interrupt on a 1000BASE-T
2717 * device, just fall into the normal MII tick path.
2718 */
2719 if (sc->sc_flags & WM_F_HAS_MII) {
2720 if (icr & ICR_LSC) {
2721 DPRINTF(WM_DEBUG_LINK,
2722 ("%s: LINK: LSC -> mii_tick\n",
2723 device_xname(&sc->sc_dev)));
2724 mii_tick(&sc->sc_mii);
2725 } else if (icr & ICR_RXSEQ) {
2726 DPRINTF(WM_DEBUG_LINK,
2727 ("%s: LINK Receive sequence error\n",
2728 device_xname(&sc->sc_dev)));
2729 }
2730 return;
2731 }
2732
2733 /*
2734 * If we are now receiving /C/, check for link again in
2735 * a couple of link clock ticks.
2736 */
2737 if (icr & ICR_RXCFG) {
2738 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2739 device_xname(&sc->sc_dev)));
2740 sc->sc_tbi_anstate = 2;
2741 }
2742
2743 if (icr & ICR_LSC) {
2744 status = CSR_READ(sc, WMREG_STATUS);
2745 if (status & STATUS_LU) {
2746 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2747 device_xname(&sc->sc_dev),
2748 (status & STATUS_FD) ? "FDX" : "HDX"));
2749 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2750 sc->sc_fcrtl &= ~FCRTL_XONE;
2751 if (status & STATUS_FD)
2752 sc->sc_tctl |=
2753 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2754 else
2755 sc->sc_tctl |=
2756 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2757 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2758 sc->sc_fcrtl |= FCRTL_XONE;
2759 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2760 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2761 WMREG_OLD_FCRTL : WMREG_FCRTL,
2762 sc->sc_fcrtl);
2763 sc->sc_tbi_linkup = 1;
2764 } else {
2765 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2766 device_xname(&sc->sc_dev)));
2767 sc->sc_tbi_linkup = 0;
2768 }
2769 sc->sc_tbi_anstate = 2;
2770 wm_tbi_set_linkled(sc);
2771 } else if (icr & ICR_RXSEQ) {
2772 DPRINTF(WM_DEBUG_LINK,
2773 ("%s: LINK: Receive sequence error\n",
2774 device_xname(&sc->sc_dev)));
2775 }
2776 }
2777
2778 /*
2779 * wm_tick:
2780 *
2781 * One second timer, used to check link status, sweep up
2782 * completed transmit jobs, etc.
2783 */
2784 static void
2785 wm_tick(void *arg)
2786 {
2787 struct wm_softc *sc = arg;
2788 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2789 int s;
2790
2791 s = splnet();
2792
2793 if (sc->sc_type >= WM_T_82542_2_1) {
2794 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2795 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2796 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2797 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2798 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2799 }
2800
2801 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2802 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2803
2804
2805 if (sc->sc_flags & WM_F_HAS_MII)
2806 mii_tick(&sc->sc_mii);
2807 else
2808 wm_tbi_check_link(sc);
2809
2810 splx(s);
2811
2812 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2813 }
2814
2815 /*
2816 * wm_reset:
2817 *
2818 * Reset the i82542 chip.
2819 */
2820 static void
2821 wm_reset(struct wm_softc *sc)
2822 {
2823 uint32_t reg;
2824
2825 /*
2826 * Allocate on-chip memory according to the MTU size.
2827 * The Packet Buffer Allocation register must be written
2828 * before the chip is reset.
2829 */
2830 switch (sc->sc_type) {
2831 case WM_T_82547:
2832 case WM_T_82547_2:
2833 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2834 PBA_22K : PBA_30K;
2835 sc->sc_txfifo_head = 0;
2836 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2837 sc->sc_txfifo_size =
2838 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2839 sc->sc_txfifo_stall = 0;
2840 break;
2841 case WM_T_82571:
2842 case WM_T_82572:
2843 case WM_T_80003:
2844 sc->sc_pba = PBA_32K;
2845 break;
2846 case WM_T_82573:
2847 sc->sc_pba = PBA_12K;
2848 break;
2849 case WM_T_ICH8:
2850 sc->sc_pba = PBA_8K;
2851 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2852 break;
2853 case WM_T_ICH9:
2854 sc->sc_pba = PBA_10K;
2855 break;
2856 default:
2857 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2858 PBA_40K : PBA_48K;
2859 break;
2860 }
2861 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2862
2863 if (sc->sc_flags & WM_F_PCIE) {
2864 int timeout = 800;
2865
2866 sc->sc_ctrl |= CTRL_GIO_M_DIS;
2867 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2868
2869 while (timeout) {
2870 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2871 break;
2872 delay(100);
2873 }
2874 }
2875
2876 /* clear interrupt */
2877 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2878
2879 /*
2880 * 82541 Errata 29? & 82547 Errata 28?
2881 * See also the description about PHY_RST bit in CTRL register
2882 * in 8254x_GBe_SDM.pdf.
2883 */
2884 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2885 CSR_WRITE(sc, WMREG_CTRL,
2886 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2887 delay(5000);
2888 }
2889
2890 switch (sc->sc_type) {
2891 case WM_T_82544:
2892 case WM_T_82540:
2893 case WM_T_82545:
2894 case WM_T_82546:
2895 case WM_T_82541:
2896 case WM_T_82541_2:
2897 /*
2898 * On some chipsets, a reset through a memory-mapped write
2899 * cycle can cause the chip to reset before completing the
2900 * write cycle. This causes major headache that can be
2901 * avoided by issuing the reset via indirect register writes
2902 * through I/O space.
2903 *
2904 * So, if we successfully mapped the I/O BAR at attach time,
2905 * use that. Otherwise, try our luck with a memory-mapped
2906 * reset.
2907 */
2908 if (sc->sc_flags & WM_F_IOH_VALID)
2909 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2910 else
2911 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2912 break;
2913
2914 case WM_T_82545_3:
2915 case WM_T_82546_3:
2916 /* Use the shadow control register on these chips. */
2917 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2918 break;
2919
2920 case WM_T_ICH8:
2921 case WM_T_ICH9:
2922 wm_get_swfwhw_semaphore(sc);
2923 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2924 delay(10000);
2925
2926 default:
2927 /* Everything else can safely use the documented method. */
2928 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2929 break;
2930 }
2931 delay(10000);
2932
2933 /* reload EEPROM */
2934 switch(sc->sc_type) {
2935 case WM_T_82542_2_0:
2936 case WM_T_82542_2_1:
2937 case WM_T_82543:
2938 case WM_T_82544:
2939 delay(10);
2940 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2941 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2942 delay(2000);
2943 break;
2944 case WM_T_82541:
2945 case WM_T_82541_2:
2946 case WM_T_82547:
2947 case WM_T_82547_2:
2948 delay(20000);
2949 break;
2950 case WM_T_82573:
2951 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
2952 delay(10);
2953 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2954 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2955 }
2956 /* FALLTHROUGH */
2957 default:
2958 /* check EECD_EE_AUTORD */
2959 wm_get_auto_rd_done(sc);
2960 }
2961
2962 #if 0
2963 for (i = 0; i < 1000; i++) {
2964 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
2965 return;
2966 }
2967 delay(20);
2968 }
2969
2970 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2971 log(LOG_ERR, "%s: reset failed to complete\n",
2972 device_xname(&sc->sc_dev));
2973 #endif
2974 }
2975
2976 /*
2977 * wm_init: [ifnet interface function]
2978 *
2979 * Initialize the interface. Must be called at splnet().
2980 */
2981 static int
2982 wm_init(struct ifnet *ifp)
2983 {
2984 struct wm_softc *sc = ifp->if_softc;
2985 struct wm_rxsoft *rxs;
2986 int i, error = 0;
2987 uint32_t reg;
2988
2989 /*
2990 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2991 * There is a small but measurable benefit to avoiding the adjusment
2992 * of the descriptor so that the headers are aligned, for normal mtu,
2993 * on such platforms. One possibility is that the DMA itself is
2994 * slightly more efficient if the front of the entire packet (instead
2995 * of the front of the headers) is aligned.
2996 *
2997 * Note we must always set align_tweak to 0 if we are using
2998 * jumbo frames.
2999 */
3000 #ifdef __NO_STRICT_ALIGNMENT
3001 sc->sc_align_tweak = 0;
3002 #else
3003 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3004 sc->sc_align_tweak = 0;
3005 else
3006 sc->sc_align_tweak = 2;
3007 #endif /* __NO_STRICT_ALIGNMENT */
3008
3009 /* Cancel any pending I/O. */
3010 wm_stop(ifp, 0);
3011
3012 /* update statistics before reset */
3013 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3014 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3015
3016 /* Reset the chip to a known state. */
3017 wm_reset(sc);
3018
3019 /* Initialize the transmit descriptor ring. */
3020 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3021 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3022 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3023 sc->sc_txfree = WM_NTXDESC(sc);
3024 sc->sc_txnext = 0;
3025
3026 if (sc->sc_type < WM_T_82543) {
3027 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3028 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3029 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3030 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3031 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3032 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3033 } else {
3034 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3035 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3036 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3037 CSR_WRITE(sc, WMREG_TDH, 0);
3038 CSR_WRITE(sc, WMREG_TDT, 0);
3039 CSR_WRITE(sc, WMREG_TIDV, 375); /* ITR / 4 */
3040 CSR_WRITE(sc, WMREG_TADV, 375); /* should be same */
3041
3042 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3043 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3044 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3045 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3046 }
3047 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3048 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3049
3050 /* Initialize the transmit job descriptors. */
3051 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3052 sc->sc_txsoft[i].txs_mbuf = NULL;
3053 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3054 sc->sc_txsnext = 0;
3055 sc->sc_txsdirty = 0;
3056
3057 /*
3058 * Initialize the receive descriptor and receive job
3059 * descriptor rings.
3060 */
3061 if (sc->sc_type < WM_T_82543) {
3062 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3063 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3064 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3065 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3066 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3067 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3068
3069 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3070 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3071 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3072 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3073 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3074 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3075 } else {
3076 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3077 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3078 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3079 CSR_WRITE(sc, WMREG_RDH, 0);
3080 CSR_WRITE(sc, WMREG_RDT, 0);
3081 CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
3082 CSR_WRITE(sc, WMREG_RADV, 375); /* MUST be same */
3083 }
3084 for (i = 0; i < WM_NRXDESC; i++) {
3085 rxs = &sc->sc_rxsoft[i];
3086 if (rxs->rxs_mbuf == NULL) {
3087 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3088 log(LOG_ERR, "%s: unable to allocate or map rx "
3089 "buffer %d, error = %d\n",
3090 device_xname(&sc->sc_dev), i, error);
3091 /*
3092 * XXX Should attempt to run with fewer receive
3093 * XXX buffers instead of just failing.
3094 */
3095 wm_rxdrain(sc);
3096 goto out;
3097 }
3098 } else
3099 WM_INIT_RXDESC(sc, i);
3100 }
3101 sc->sc_rxptr = 0;
3102 sc->sc_rxdiscard = 0;
3103 WM_RXCHAIN_RESET(sc);
3104
3105 /*
3106 * Clear out the VLAN table -- we don't use it (yet).
3107 */
3108 CSR_WRITE(sc, WMREG_VET, 0);
3109 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3110 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3111
3112 /*
3113 * Set up flow-control parameters.
3114 *
3115 * XXX Values could probably stand some tuning.
3116 */
3117 if (sc->sc_type != WM_T_ICH8) {
3118 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3119 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3120 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3121 }
3122
3123 sc->sc_fcrtl = FCRTL_DFLT;
3124 if (sc->sc_type < WM_T_82543) {
3125 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3126 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3127 } else {
3128 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3129 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3130 }
3131 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3132
3133 #if 0 /* XXXJRT */
3134 /* Deal with VLAN enables. */
3135 if (VLAN_ATTACHED(&sc->sc_ethercom))
3136 sc->sc_ctrl |= CTRL_VME;
3137 else
3138 #endif /* XXXJRT */
3139 sc->sc_ctrl &= ~CTRL_VME;
3140
3141 /* Write the control registers. */
3142 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3143 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3144 int val;
3145 val = CSR_READ(sc, WMREG_CTRL_EXT);
3146 val &= ~CTRL_EXT_LINK_MODE_MASK;
3147 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3148
3149 /* Bypass RX and TX FIFO's */
3150 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3151 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3152 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3153
3154 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3155 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3156 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3157 /*
3158 * Set the mac to wait the maximum time between each
3159 * iteration and increase the max iterations when
3160 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3161 */
3162 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3163 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3164 val |= 0x3F;
3165 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3166 }
3167 #if 0
3168 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3169 #endif
3170
3171 /*
3172 * Set up checksum offload parameters.
3173 */
3174 reg = CSR_READ(sc, WMREG_RXCSUM);
3175 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3176 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3177 reg |= RXCSUM_IPOFL;
3178 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3179 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3180 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3181 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3182 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3183
3184 /*
3185 * Set up the interrupt registers.
3186 */
3187 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3188 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3189 ICR_RXO | ICR_RXT0;
3190 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3191 sc->sc_icr |= ICR_RXCFG;
3192 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3193
3194 /* Set up the inter-packet gap. */
3195 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3196
3197 if (sc->sc_type >= WM_T_82543) {
3198 /*
3199 * Set up the interrupt throttling register (units of 256ns)
3200 * Note that a footnote in Intel's documentation says this
3201 * ticker runs at 1/4 the rate when the chip is in 100Mbit
3202 * or 10Mbit mode. Empirically, it appears to be the case
3203 * that that is also true for the 1024ns units of the other
3204 * interrupt-related timer registers -- so, really, we ought
3205 * to divide this value by 4 when the link speed is low.
3206 *
3207 * XXX implement this division at link speed change!
3208 */
3209
3210 /*
3211 * For N interrupts/sec, set this value to:
3212 * 1000000000 / (N * 256). Note that we set the
3213 * absolute and packet timer values to this value
3214 * divided by 4 to get "simple timer" behavior.
3215 */
3216
3217 sc->sc_itr = 1500; /* 2604 ints/sec */
3218 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3219 }
3220
3221 #if 0 /* XXXJRT */
3222 /* Set the VLAN ethernetype. */
3223 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3224 #endif
3225
3226 /*
3227 * Set up the transmit control register; we start out with
3228 * a collision distance suitable for FDX, but update it whe
3229 * we resolve the media type.
3230 */
3231 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3232 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3233 if (sc->sc_type >= WM_T_82571)
3234 sc->sc_tctl |= TCTL_MULR;
3235 if (sc->sc_type >= WM_T_80003)
3236 sc->sc_tctl |= TCTL_RTLC;
3237 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3238
3239 /* Set the media. */
3240 if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
3241 goto out;
3242
3243 /*
3244 * Set up the receive control register; we actually program
3245 * the register when we set the receive filter. Use multicast
3246 * address offset type 0.
3247 *
3248 * Only the i82544 has the ability to strip the incoming
3249 * CRC, so we don't enable that feature.
3250 */
3251 sc->sc_mchash_type = 0;
3252 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3253 | RCTL_MO(sc->sc_mchash_type);
3254
3255 /* 82573 doesn't support jumbo frame */
3256 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3257 sc->sc_rctl |= RCTL_LPE;
3258
3259 if (MCLBYTES == 2048) {
3260 sc->sc_rctl |= RCTL_2k;
3261 } else {
3262 if (sc->sc_type >= WM_T_82543) {
3263 switch(MCLBYTES) {
3264 case 4096:
3265 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3266 break;
3267 case 8192:
3268 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3269 break;
3270 case 16384:
3271 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3272 break;
3273 default:
3274 panic("wm_init: MCLBYTES %d unsupported",
3275 MCLBYTES);
3276 break;
3277 }
3278 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3279 }
3280
3281 /* Set the receive filter. */
3282 wm_set_filter(sc);
3283
3284 /* Start the one second link check clock. */
3285 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3286
3287 /* ...all done! */
3288 ifp->if_flags |= IFF_RUNNING;
3289 ifp->if_flags &= ~IFF_OACTIVE;
3290
3291 out:
3292 if (error)
3293 log(LOG_ERR, "%s: interface not running\n",
3294 device_xname(&sc->sc_dev));
3295 return (error);
3296 }
3297
3298 /*
3299 * wm_rxdrain:
3300 *
3301 * Drain the receive queue.
3302 */
3303 static void
3304 wm_rxdrain(struct wm_softc *sc)
3305 {
3306 struct wm_rxsoft *rxs;
3307 int i;
3308
3309 for (i = 0; i < WM_NRXDESC; i++) {
3310 rxs = &sc->sc_rxsoft[i];
3311 if (rxs->rxs_mbuf != NULL) {
3312 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3313 m_freem(rxs->rxs_mbuf);
3314 rxs->rxs_mbuf = NULL;
3315 }
3316 }
3317 }
3318
3319 /*
3320 * wm_stop: [ifnet interface function]
3321 *
3322 * Stop transmission on the interface.
3323 */
3324 static void
3325 wm_stop(struct ifnet *ifp, int disable)
3326 {
3327 struct wm_softc *sc = ifp->if_softc;
3328 struct wm_txsoft *txs;
3329 int i;
3330
3331 /* Stop the one second clock. */
3332 callout_stop(&sc->sc_tick_ch);
3333
3334 /* Stop the 82547 Tx FIFO stall check timer. */
3335 if (sc->sc_type == WM_T_82547)
3336 callout_stop(&sc->sc_txfifo_ch);
3337
3338 if (sc->sc_flags & WM_F_HAS_MII) {
3339 /* Down the MII. */
3340 mii_down(&sc->sc_mii);
3341 }
3342
3343 /* Stop the transmit and receive processes. */
3344 CSR_WRITE(sc, WMREG_TCTL, 0);
3345 CSR_WRITE(sc, WMREG_RCTL, 0);
3346
3347 /*
3348 * Clear the interrupt mask to ensure the device cannot assert its
3349 * interrupt line.
3350 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3351 * any currently pending or shared interrupt.
3352 */
3353 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3354 sc->sc_icr = 0;
3355
3356 /* Release any queued transmit buffers. */
3357 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3358 txs = &sc->sc_txsoft[i];
3359 if (txs->txs_mbuf != NULL) {
3360 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3361 m_freem(txs->txs_mbuf);
3362 txs->txs_mbuf = NULL;
3363 }
3364 }
3365
3366 /* Mark the interface as down and cancel the watchdog timer. */
3367 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3368 ifp->if_timer = 0;
3369
3370 if (disable)
3371 wm_rxdrain(sc);
3372 }
3373
3374 void
3375 wm_get_auto_rd_done(struct wm_softc *sc)
3376 {
3377 int i;
3378
3379 /* wait for eeprom to reload */
3380 switch (sc->sc_type) {
3381 case WM_T_82571:
3382 case WM_T_82572:
3383 case WM_T_82573:
3384 case WM_T_80003:
3385 case WM_T_ICH8:
3386 case WM_T_ICH9:
3387 for (i = 10; i > 0; i--) {
3388 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3389 break;
3390 delay(1000);
3391 }
3392 if (i == 0) {
3393 log(LOG_ERR, "%s: auto read from eeprom failed to "
3394 "complete\n", device_xname(&sc->sc_dev));
3395 }
3396 break;
3397 default:
3398 delay(5000);
3399 break;
3400 }
3401
3402 /* Phy configuration starts after EECD_AUTO_RD is set */
3403 if (sc->sc_type == WM_T_82573)
3404 delay(25000);
3405 }
3406
3407 /*
3408 * wm_acquire_eeprom:
3409 *
3410 * Perform the EEPROM handshake required on some chips.
3411 */
3412 static int
3413 wm_acquire_eeprom(struct wm_softc *sc)
3414 {
3415 uint32_t reg;
3416 int x;
3417 int ret = 0;
3418
3419 /* always success */
3420 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3421 return 0;
3422
3423 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3424 ret = wm_get_swfwhw_semaphore(sc);
3425 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3426 /* this will also do wm_get_swsm_semaphore() if needed */
3427 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3428 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3429 ret = wm_get_swsm_semaphore(sc);
3430 }
3431
3432 if (ret)
3433 return 1;
3434
3435 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3436 reg = CSR_READ(sc, WMREG_EECD);
3437
3438 /* Request EEPROM access. */
3439 reg |= EECD_EE_REQ;
3440 CSR_WRITE(sc, WMREG_EECD, reg);
3441
3442 /* ..and wait for it to be granted. */
3443 for (x = 0; x < 1000; x++) {
3444 reg = CSR_READ(sc, WMREG_EECD);
3445 if (reg & EECD_EE_GNT)
3446 break;
3447 delay(5);
3448 }
3449 if ((reg & EECD_EE_GNT) == 0) {
3450 aprint_error_dev(&sc->sc_dev, "could not acquire EEPROM GNT\n");
3451 reg &= ~EECD_EE_REQ;
3452 CSR_WRITE(sc, WMREG_EECD, reg);
3453 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3454 wm_put_swfwhw_semaphore(sc);
3455 if (sc->sc_flags & WM_F_SWFW_SYNC)
3456 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3457 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3458 wm_put_swsm_semaphore(sc);
3459 return (1);
3460 }
3461 }
3462
3463 return (0);
3464 }
3465
3466 /*
3467 * wm_release_eeprom:
3468 *
3469 * Release the EEPROM mutex.
3470 */
3471 static void
3472 wm_release_eeprom(struct wm_softc *sc)
3473 {
3474 uint32_t reg;
3475
3476 /* always success */
3477 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3478 return;
3479
3480 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3481 reg = CSR_READ(sc, WMREG_EECD);
3482 reg &= ~EECD_EE_REQ;
3483 CSR_WRITE(sc, WMREG_EECD, reg);
3484 }
3485
3486 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3487 wm_put_swfwhw_semaphore(sc);
3488 if (sc->sc_flags & WM_F_SWFW_SYNC)
3489 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3490 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3491 wm_put_swsm_semaphore(sc);
3492 }
3493
3494 /*
3495 * wm_eeprom_sendbits:
3496 *
3497 * Send a series of bits to the EEPROM.
3498 */
3499 static void
3500 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3501 {
3502 uint32_t reg;
3503 int x;
3504
3505 reg = CSR_READ(sc, WMREG_EECD);
3506
3507 for (x = nbits; x > 0; x--) {
3508 if (bits & (1U << (x - 1)))
3509 reg |= EECD_DI;
3510 else
3511 reg &= ~EECD_DI;
3512 CSR_WRITE(sc, WMREG_EECD, reg);
3513 delay(2);
3514 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3515 delay(2);
3516 CSR_WRITE(sc, WMREG_EECD, reg);
3517 delay(2);
3518 }
3519 }
3520
3521 /*
3522 * wm_eeprom_recvbits:
3523 *
3524 * Receive a series of bits from the EEPROM.
3525 */
3526 static void
3527 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3528 {
3529 uint32_t reg, val;
3530 int x;
3531
3532 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3533
3534 val = 0;
3535 for (x = nbits; x > 0; x--) {
3536 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3537 delay(2);
3538 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3539 val |= (1U << (x - 1));
3540 CSR_WRITE(sc, WMREG_EECD, reg);
3541 delay(2);
3542 }
3543 *valp = val;
3544 }
3545
3546 /*
3547 * wm_read_eeprom_uwire:
3548 *
3549 * Read a word from the EEPROM using the MicroWire protocol.
3550 */
3551 static int
3552 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3553 {
3554 uint32_t reg, val;
3555 int i;
3556
3557 for (i = 0; i < wordcnt; i++) {
3558 /* Clear SK and DI. */
3559 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3560 CSR_WRITE(sc, WMREG_EECD, reg);
3561
3562 /* Set CHIP SELECT. */
3563 reg |= EECD_CS;
3564 CSR_WRITE(sc, WMREG_EECD, reg);
3565 delay(2);
3566
3567 /* Shift in the READ command. */
3568 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3569
3570 /* Shift in address. */
3571 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3572
3573 /* Shift out the data. */
3574 wm_eeprom_recvbits(sc, &val, 16);
3575 data[i] = val & 0xffff;
3576
3577 /* Clear CHIP SELECT. */
3578 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3579 CSR_WRITE(sc, WMREG_EECD, reg);
3580 delay(2);
3581 }
3582
3583 return (0);
3584 }
3585
3586 /*
3587 * wm_spi_eeprom_ready:
3588 *
3589 * Wait for a SPI EEPROM to be ready for commands.
3590 */
3591 static int
3592 wm_spi_eeprom_ready(struct wm_softc *sc)
3593 {
3594 uint32_t val;
3595 int usec;
3596
3597 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3598 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3599 wm_eeprom_recvbits(sc, &val, 8);
3600 if ((val & SPI_SR_RDY) == 0)
3601 break;
3602 }
3603 if (usec >= SPI_MAX_RETRIES) {
3604 aprint_error_dev(&sc->sc_dev, "EEPROM failed to become ready\n");
3605 return (1);
3606 }
3607 return (0);
3608 }
3609
3610 /*
3611 * wm_read_eeprom_spi:
3612 *
3613 * Read a work from the EEPROM using the SPI protocol.
3614 */
3615 static int
3616 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3617 {
3618 uint32_t reg, val;
3619 int i;
3620 uint8_t opc;
3621
3622 /* Clear SK and CS. */
3623 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3624 CSR_WRITE(sc, WMREG_EECD, reg);
3625 delay(2);
3626
3627 if (wm_spi_eeprom_ready(sc))
3628 return (1);
3629
3630 /* Toggle CS to flush commands. */
3631 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3632 delay(2);
3633 CSR_WRITE(sc, WMREG_EECD, reg);
3634 delay(2);
3635
3636 opc = SPI_OPC_READ;
3637 if (sc->sc_ee_addrbits == 8 && word >= 128)
3638 opc |= SPI_OPC_A8;
3639
3640 wm_eeprom_sendbits(sc, opc, 8);
3641 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3642
3643 for (i = 0; i < wordcnt; i++) {
3644 wm_eeprom_recvbits(sc, &val, 16);
3645 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3646 }
3647
3648 /* Raise CS and clear SK. */
3649 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3650 CSR_WRITE(sc, WMREG_EECD, reg);
3651 delay(2);
3652
3653 return (0);
3654 }
3655
3656 #define EEPROM_CHECKSUM 0xBABA
3657 #define EEPROM_SIZE 0x0040
3658
3659 /*
3660 * wm_validate_eeprom_checksum
3661 *
3662 * The checksum is defined as the sum of the first 64 (16 bit) words.
3663 */
3664 static int
3665 wm_validate_eeprom_checksum(struct wm_softc *sc)
3666 {
3667 uint16_t checksum;
3668 uint16_t eeprom_data;
3669 int i;
3670
3671 checksum = 0;
3672
3673 for (i = 0; i < EEPROM_SIZE; i++) {
3674 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3675 return 1;
3676 checksum += eeprom_data;
3677 }
3678
3679 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3680 return 1;
3681
3682 return 0;
3683 }
3684
3685 /*
3686 * wm_read_eeprom:
3687 *
3688 * Read data from the serial EEPROM.
3689 */
3690 static int
3691 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3692 {
3693 int rv;
3694
3695 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3696 return 1;
3697
3698 if (wm_acquire_eeprom(sc))
3699 return 1;
3700
3701 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3702 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3703 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3704 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3705 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3706 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3707 else
3708 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3709
3710 wm_release_eeprom(sc);
3711 return rv;
3712 }
3713
3714 static int
3715 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3716 uint16_t *data)
3717 {
3718 int i, eerd = 0;
3719 int error = 0;
3720
3721 for (i = 0; i < wordcnt; i++) {
3722 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3723
3724 CSR_WRITE(sc, WMREG_EERD, eerd);
3725 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3726 if (error != 0)
3727 break;
3728
3729 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3730 }
3731
3732 return error;
3733 }
3734
3735 static int
3736 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3737 {
3738 uint32_t attempts = 100000;
3739 uint32_t i, reg = 0;
3740 int32_t done = -1;
3741
3742 for (i = 0; i < attempts; i++) {
3743 reg = CSR_READ(sc, rw);
3744
3745 if (reg & EERD_DONE) {
3746 done = 0;
3747 break;
3748 }
3749 delay(5);
3750 }
3751
3752 return done;
3753 }
3754
3755 /*
3756 * wm_add_rxbuf:
3757 *
3758 * Add a receive buffer to the indiciated descriptor.
3759 */
3760 static int
3761 wm_add_rxbuf(struct wm_softc *sc, int idx)
3762 {
3763 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3764 struct mbuf *m;
3765 int error;
3766
3767 MGETHDR(m, M_DONTWAIT, MT_DATA);
3768 if (m == NULL)
3769 return (ENOBUFS);
3770
3771 MCLGET(m, M_DONTWAIT);
3772 if ((m->m_flags & M_EXT) == 0) {
3773 m_freem(m);
3774 return (ENOBUFS);
3775 }
3776
3777 if (rxs->rxs_mbuf != NULL)
3778 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3779
3780 rxs->rxs_mbuf = m;
3781
3782 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3783 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3784 BUS_DMA_READ|BUS_DMA_NOWAIT);
3785 if (error) {
3786 /* XXX XXX XXX */
3787 aprint_error_dev(&sc->sc_dev, "unable to load rx DMA map %d, error = %d\n",
3788 idx, error);
3789 panic("wm_add_rxbuf");
3790 }
3791
3792 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3793 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3794
3795 WM_INIT_RXDESC(sc, idx);
3796
3797 return (0);
3798 }
3799
3800 /*
3801 * wm_set_ral:
3802 *
3803 * Set an entery in the receive address list.
3804 */
3805 static void
3806 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3807 {
3808 uint32_t ral_lo, ral_hi;
3809
3810 if (enaddr != NULL) {
3811 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3812 (enaddr[3] << 24);
3813 ral_hi = enaddr[4] | (enaddr[5] << 8);
3814 ral_hi |= RAL_AV;
3815 } else {
3816 ral_lo = 0;
3817 ral_hi = 0;
3818 }
3819
3820 if (sc->sc_type >= WM_T_82544) {
3821 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3822 ral_lo);
3823 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3824 ral_hi);
3825 } else {
3826 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3827 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3828 }
3829 }
3830
3831 /*
3832 * wm_mchash:
3833 *
3834 * Compute the hash of the multicast address for the 4096-bit
3835 * multicast filter.
3836 */
3837 static uint32_t
3838 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3839 {
3840 static const int lo_shift[4] = { 4, 3, 2, 0 };
3841 static const int hi_shift[4] = { 4, 5, 6, 8 };
3842 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3843 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3844 uint32_t hash;
3845
3846 if (sc->sc_type == WM_T_ICH8) {
3847 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3848 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3849 return (hash & 0x3ff);
3850 }
3851 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3852 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3853
3854 return (hash & 0xfff);
3855 }
3856
3857 /*
3858 * wm_set_filter:
3859 *
3860 * Set up the receive filter.
3861 */
3862 static void
3863 wm_set_filter(struct wm_softc *sc)
3864 {
3865 struct ethercom *ec = &sc->sc_ethercom;
3866 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3867 struct ether_multi *enm;
3868 struct ether_multistep step;
3869 bus_addr_t mta_reg;
3870 uint32_t hash, reg, bit;
3871 int i, size;
3872
3873 if (sc->sc_type >= WM_T_82544)
3874 mta_reg = WMREG_CORDOVA_MTA;
3875 else
3876 mta_reg = WMREG_MTA;
3877
3878 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3879
3880 if (ifp->if_flags & IFF_BROADCAST)
3881 sc->sc_rctl |= RCTL_BAM;
3882 if (ifp->if_flags & IFF_PROMISC) {
3883 sc->sc_rctl |= RCTL_UPE;
3884 goto allmulti;
3885 }
3886
3887 /*
3888 * Set the station address in the first RAL slot, and
3889 * clear the remaining slots.
3890 */
3891 if (sc->sc_type == WM_T_ICH8)
3892 size = WM_ICH8_RAL_TABSIZE;
3893 else
3894 size = WM_RAL_TABSIZE;
3895 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3896 for (i = 1; i < size; i++)
3897 wm_set_ral(sc, NULL, i);
3898
3899 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3900 size = WM_ICH8_MC_TABSIZE;
3901 else
3902 size = WM_MC_TABSIZE;
3903 /* Clear out the multicast table. */
3904 for (i = 0; i < size; i++)
3905 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3906
3907 ETHER_FIRST_MULTI(step, ec, enm);
3908 while (enm != NULL) {
3909 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3910 /*
3911 * We must listen to a range of multicast addresses.
3912 * For now, just accept all multicasts, rather than
3913 * trying to set only those filter bits needed to match
3914 * the range. (At this time, the only use of address
3915 * ranges is for IP multicast routing, for which the
3916 * range is big enough to require all bits set.)
3917 */
3918 goto allmulti;
3919 }
3920
3921 hash = wm_mchash(sc, enm->enm_addrlo);
3922
3923 reg = (hash >> 5);
3924 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3925 reg &= 0x1f;
3926 else
3927 reg &= 0x7f;
3928 bit = hash & 0x1f;
3929
3930 hash = CSR_READ(sc, mta_reg + (reg << 2));
3931 hash |= 1U << bit;
3932
3933 /* XXX Hardware bug?? */
3934 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3935 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3936 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3937 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3938 } else
3939 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3940
3941 ETHER_NEXT_MULTI(step, enm);
3942 }
3943
3944 ifp->if_flags &= ~IFF_ALLMULTI;
3945 goto setit;
3946
3947 allmulti:
3948 ifp->if_flags |= IFF_ALLMULTI;
3949 sc->sc_rctl |= RCTL_MPE;
3950
3951 setit:
3952 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3953 }
3954
3955 /*
3956 * wm_tbi_mediainit:
3957 *
3958 * Initialize media for use on 1000BASE-X devices.
3959 */
3960 static void
3961 wm_tbi_mediainit(struct wm_softc *sc)
3962 {
3963 const char *sep = "";
3964
3965 if (sc->sc_type < WM_T_82543)
3966 sc->sc_tipg = TIPG_WM_DFLT;
3967 else
3968 sc->sc_tipg = TIPG_LG_DFLT;
3969
3970 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3971 wm_tbi_mediastatus);
3972
3973 /*
3974 * SWD Pins:
3975 *
3976 * 0 = Link LED (output)
3977 * 1 = Loss Of Signal (input)
3978 */
3979 sc->sc_ctrl |= CTRL_SWDPIO(0);
3980 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3981
3982 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3983
3984 #define ADD(ss, mm, dd) \
3985 do { \
3986 aprint_normal("%s%s", sep, ss); \
3987 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3988 sep = ", "; \
3989 } while (/*CONSTCOND*/0)
3990
3991 aprint_normal_dev(&sc->sc_dev, "");
3992 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3993 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3994 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3995 aprint_normal("\n");
3996
3997 #undef ADD
3998
3999 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4000 }
4001
4002 /*
4003 * wm_tbi_mediastatus: [ifmedia interface function]
4004 *
4005 * Get the current interface media status on a 1000BASE-X device.
4006 */
4007 static void
4008 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4009 {
4010 struct wm_softc *sc = ifp->if_softc;
4011 uint32_t ctrl;
4012
4013 ifmr->ifm_status = IFM_AVALID;
4014 ifmr->ifm_active = IFM_ETHER;
4015
4016 if (sc->sc_tbi_linkup == 0) {
4017 ifmr->ifm_active |= IFM_NONE;
4018 return;
4019 }
4020
4021 ifmr->ifm_status |= IFM_ACTIVE;
4022 ifmr->ifm_active |= IFM_1000_SX;
4023 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4024 ifmr->ifm_active |= IFM_FDX;
4025 ctrl = CSR_READ(sc, WMREG_CTRL);
4026 if (ctrl & CTRL_RFCE)
4027 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4028 if (ctrl & CTRL_TFCE)
4029 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4030 }
4031
4032 /*
4033 * wm_tbi_mediachange: [ifmedia interface function]
4034 *
4035 * Set hardware to newly-selected media on a 1000BASE-X device.
4036 */
4037 static int
4038 wm_tbi_mediachange(struct ifnet *ifp)
4039 {
4040 struct wm_softc *sc = ifp->if_softc;
4041 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4042 uint32_t status;
4043 int i;
4044
4045 sc->sc_txcw = ife->ifm_data;
4046 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
4047 device_xname(&sc->sc_dev),sc->sc_txcw));
4048 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4049 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4050 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
4051 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4052 sc->sc_txcw |= TXCW_ANE;
4053 } else {
4054 /*If autonegotiation is turned off, force link up and turn on full duplex*/
4055 sc->sc_txcw &= ~TXCW_ANE;
4056 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4057 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4058 delay(1000);
4059 }
4060
4061 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4062 device_xname(&sc->sc_dev),sc->sc_txcw));
4063 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4064 delay(10000);
4065
4066 /* NOTE: CTRL will update TFCE and RFCE automatically. */
4067
4068 sc->sc_tbi_anstate = 0;
4069
4070 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4071 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(&sc->sc_dev),i));
4072
4073 /*
4074 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4075 * optics detect a signal, 0 if they don't.
4076 */
4077 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4078 /* Have signal; wait for the link to come up. */
4079
4080 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4081 /*
4082 * Reset the link, and let autonegotiation do its thing
4083 */
4084 sc->sc_ctrl |= CTRL_LRST;
4085 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4086 delay(1000);
4087 sc->sc_ctrl &= ~CTRL_LRST;
4088 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4089 delay(1000);
4090 }
4091
4092 for (i = 0; i < 50; i++) {
4093 delay(10000);
4094 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4095 break;
4096 }
4097
4098 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4099 device_xname(&sc->sc_dev),i));
4100
4101 status = CSR_READ(sc, WMREG_STATUS);
4102 DPRINTF(WM_DEBUG_LINK,
4103 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4104 device_xname(&sc->sc_dev),status, STATUS_LU));
4105 if (status & STATUS_LU) {
4106 /* Link is up. */
4107 DPRINTF(WM_DEBUG_LINK,
4108 ("%s: LINK: set media -> link up %s\n",
4109 device_xname(&sc->sc_dev),
4110 (status & STATUS_FD) ? "FDX" : "HDX"));
4111 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4112 sc->sc_fcrtl &= ~FCRTL_XONE;
4113 if (status & STATUS_FD)
4114 sc->sc_tctl |=
4115 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4116 else
4117 sc->sc_tctl |=
4118 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4119 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4120 sc->sc_fcrtl |= FCRTL_XONE;
4121 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4122 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4123 WMREG_OLD_FCRTL : WMREG_FCRTL,
4124 sc->sc_fcrtl);
4125 sc->sc_tbi_linkup = 1;
4126 } else {
4127 /* Link is down. */
4128 DPRINTF(WM_DEBUG_LINK,
4129 ("%s: LINK: set media -> link down\n",
4130 device_xname(&sc->sc_dev)));
4131 sc->sc_tbi_linkup = 0;
4132 }
4133 } else {
4134 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4135 device_xname(&sc->sc_dev)));
4136 sc->sc_tbi_linkup = 0;
4137 }
4138
4139 wm_tbi_set_linkled(sc);
4140
4141 return (0);
4142 }
4143
4144 /*
4145 * wm_tbi_set_linkled:
4146 *
4147 * Update the link LED on 1000BASE-X devices.
4148 */
4149 static void
4150 wm_tbi_set_linkled(struct wm_softc *sc)
4151 {
4152
4153 if (sc->sc_tbi_linkup)
4154 sc->sc_ctrl |= CTRL_SWDPIN(0);
4155 else
4156 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4157
4158 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4159 }
4160
4161 /*
4162 * wm_tbi_check_link:
4163 *
4164 * Check the link on 1000BASE-X devices.
4165 */
4166 static void
4167 wm_tbi_check_link(struct wm_softc *sc)
4168 {
4169 uint32_t rxcw, ctrl, status;
4170
4171 if (sc->sc_tbi_anstate == 0)
4172 return;
4173 else if (sc->sc_tbi_anstate > 1) {
4174 DPRINTF(WM_DEBUG_LINK,
4175 ("%s: LINK: anstate %d\n", device_xname(&sc->sc_dev),
4176 sc->sc_tbi_anstate));
4177 sc->sc_tbi_anstate--;
4178 return;
4179 }
4180
4181 sc->sc_tbi_anstate = 0;
4182
4183 rxcw = CSR_READ(sc, WMREG_RXCW);
4184 ctrl = CSR_READ(sc, WMREG_CTRL);
4185 status = CSR_READ(sc, WMREG_STATUS);
4186
4187 if ((status & STATUS_LU) == 0) {
4188 DPRINTF(WM_DEBUG_LINK,
4189 ("%s: LINK: checklink -> down\n", device_xname(&sc->sc_dev)));
4190 sc->sc_tbi_linkup = 0;
4191 } else {
4192 DPRINTF(WM_DEBUG_LINK,
4193 ("%s: LINK: checklink -> up %s\n", device_xname(&sc->sc_dev),
4194 (status & STATUS_FD) ? "FDX" : "HDX"));
4195 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4196 sc->sc_fcrtl &= ~FCRTL_XONE;
4197 if (status & STATUS_FD)
4198 sc->sc_tctl |=
4199 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4200 else
4201 sc->sc_tctl |=
4202 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4203 if (ctrl & CTRL_TFCE)
4204 sc->sc_fcrtl |= FCRTL_XONE;
4205 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4206 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4207 WMREG_OLD_FCRTL : WMREG_FCRTL,
4208 sc->sc_fcrtl);
4209 sc->sc_tbi_linkup = 1;
4210 }
4211
4212 wm_tbi_set_linkled(sc);
4213 }
4214
4215 /*
4216 * wm_gmii_reset:
4217 *
4218 * Reset the PHY.
4219 */
4220 static void
4221 wm_gmii_reset(struct wm_softc *sc)
4222 {
4223 uint32_t reg;
4224 int func = 0; /* XXX gcc */
4225
4226 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
4227 if (wm_get_swfwhw_semaphore(sc))
4228 return;
4229 }
4230 if (sc->sc_type == WM_T_80003) {
4231 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4232 if (wm_get_swfw_semaphore(sc,
4233 func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4234 return;
4235 }
4236 if (sc->sc_type >= WM_T_82544) {
4237 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4238 delay(20000);
4239
4240 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4241 delay(20000);
4242 } else {
4243 /*
4244 * With 82543, we need to force speed and duplex on the MAC
4245 * equal to what the PHY speed and duplex configuration is.
4246 * In addition, we need to perform a hardware reset on the PHY
4247 * to take it out of reset.
4248 */
4249 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4250 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4251
4252 /* The PHY reset pin is active-low. */
4253 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4254 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4255 CTRL_EXT_SWDPIN(4));
4256 reg |= CTRL_EXT_SWDPIO(4);
4257
4258 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4259 delay(10);
4260
4261 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4262 delay(10000);
4263
4264 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4265 delay(10);
4266 #if 0
4267 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4268 #endif
4269 }
4270 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
4271 wm_put_swfwhw_semaphore(sc);
4272 if (sc->sc_type == WM_T_80003)
4273 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4274 }
4275
4276 /*
4277 * wm_gmii_mediainit:
4278 *
4279 * Initialize media for use on 1000BASE-T devices.
4280 */
4281 static void
4282 wm_gmii_mediainit(struct wm_softc *sc)
4283 {
4284 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4285
4286 /* We have MII. */
4287 sc->sc_flags |= WM_F_HAS_MII;
4288
4289 if (sc->sc_type >= WM_T_80003)
4290 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4291 else
4292 sc->sc_tipg = TIPG_1000T_DFLT;
4293
4294 /*
4295 * Let the chip set speed/duplex on its own based on
4296 * signals from the PHY.
4297 * XXXbouyer - I'm not sure this is right for the 80003,
4298 * the em driver only sets CTRL_SLU here - but it seems to work.
4299 */
4300 sc->sc_ctrl |= CTRL_SLU;
4301 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4302
4303 /* Initialize our media structures and probe the GMII. */
4304 sc->sc_mii.mii_ifp = ifp;
4305
4306 if (sc->sc_type >= WM_T_80003) {
4307 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4308 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4309 } else if (sc->sc_type >= WM_T_82544) {
4310 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4311 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4312 } else {
4313 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4314 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4315 }
4316 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4317
4318 wm_gmii_reset(sc);
4319
4320 sc->sc_ethercom.ec_mii = &sc->sc_mii;
4321 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4322 wm_gmii_mediastatus);
4323
4324 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4325 MII_OFFSET_ANY, MIIF_DOPAUSE);
4326 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4327 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4328 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4329 } else
4330 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4331 }
4332
4333 /*
4334 * wm_gmii_mediastatus: [ifmedia interface function]
4335 *
4336 * Get the current interface media status on a 1000BASE-T device.
4337 */
4338 static void
4339 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4340 {
4341 struct wm_softc *sc = ifp->if_softc;
4342
4343 ether_mediastatus(ifp, ifmr);
4344 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) |
4345 sc->sc_flowflags;
4346 }
4347
4348 /*
4349 * wm_gmii_mediachange: [ifmedia interface function]
4350 *
4351 * Set hardware to newly-selected media on a 1000BASE-T device.
4352 */
4353 static int
4354 wm_gmii_mediachange(struct ifnet *ifp)
4355 {
4356 struct wm_softc *sc = ifp->if_softc;
4357 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4358 int rc;
4359
4360 if ((ifp->if_flags & IFF_UP) == 0)
4361 return 0;
4362
4363 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4364 sc->sc_ctrl |= CTRL_SLU;
4365 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4366 || (sc->sc_type > WM_T_82543)) {
4367 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4368 } else {
4369 sc->sc_ctrl &= ~CTRL_ASDE;
4370 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4371 if (ife->ifm_media & IFM_FDX)
4372 sc->sc_ctrl |= CTRL_FD;
4373 switch(IFM_SUBTYPE(ife->ifm_media)) {
4374 case IFM_10_T:
4375 sc->sc_ctrl |= CTRL_SPEED_10;
4376 break;
4377 case IFM_100_TX:
4378 sc->sc_ctrl |= CTRL_SPEED_100;
4379 break;
4380 case IFM_1000_T:
4381 sc->sc_ctrl |= CTRL_SPEED_1000;
4382 break;
4383 default:
4384 panic("wm_gmii_mediachange: bad media 0x%x",
4385 ife->ifm_media);
4386 }
4387 }
4388 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4389 if (sc->sc_type <= WM_T_82543)
4390 wm_gmii_reset(sc);
4391
4392 if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
4393 return 0;
4394 return rc;
4395 }
4396
4397 #define MDI_IO CTRL_SWDPIN(2)
4398 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4399 #define MDI_CLK CTRL_SWDPIN(3)
4400
4401 static void
4402 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4403 {
4404 uint32_t i, v;
4405
4406 v = CSR_READ(sc, WMREG_CTRL);
4407 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4408 v |= MDI_DIR | CTRL_SWDPIO(3);
4409
4410 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4411 if (data & i)
4412 v |= MDI_IO;
4413 else
4414 v &= ~MDI_IO;
4415 CSR_WRITE(sc, WMREG_CTRL, v);
4416 delay(10);
4417 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4418 delay(10);
4419 CSR_WRITE(sc, WMREG_CTRL, v);
4420 delay(10);
4421 }
4422 }
4423
4424 static uint32_t
4425 i82543_mii_recvbits(struct wm_softc *sc)
4426 {
4427 uint32_t v, i, data = 0;
4428
4429 v = CSR_READ(sc, WMREG_CTRL);
4430 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4431 v |= CTRL_SWDPIO(3);
4432
4433 CSR_WRITE(sc, WMREG_CTRL, v);
4434 delay(10);
4435 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4436 delay(10);
4437 CSR_WRITE(sc, WMREG_CTRL, v);
4438 delay(10);
4439
4440 for (i = 0; i < 16; i++) {
4441 data <<= 1;
4442 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4443 delay(10);
4444 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4445 data |= 1;
4446 CSR_WRITE(sc, WMREG_CTRL, v);
4447 delay(10);
4448 }
4449
4450 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4451 delay(10);
4452 CSR_WRITE(sc, WMREG_CTRL, v);
4453 delay(10);
4454
4455 return (data);
4456 }
4457
4458 #undef MDI_IO
4459 #undef MDI_DIR
4460 #undef MDI_CLK
4461
4462 /*
4463 * wm_gmii_i82543_readreg: [mii interface function]
4464 *
4465 * Read a PHY register on the GMII (i82543 version).
4466 */
4467 static int
4468 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
4469 {
4470 struct wm_softc *sc = device_private(self);
4471 int rv;
4472
4473 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4474 i82543_mii_sendbits(sc, reg | (phy << 5) |
4475 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4476 rv = i82543_mii_recvbits(sc) & 0xffff;
4477
4478 DPRINTF(WM_DEBUG_GMII,
4479 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4480 device_xname(&sc->sc_dev), phy, reg, rv));
4481
4482 return (rv);
4483 }
4484
4485 /*
4486 * wm_gmii_i82543_writereg: [mii interface function]
4487 *
4488 * Write a PHY register on the GMII (i82543 version).
4489 */
4490 static void
4491 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
4492 {
4493 struct wm_softc *sc = device_private(self);
4494
4495 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4496 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4497 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4498 (MII_COMMAND_START << 30), 32);
4499 }
4500
4501 /*
4502 * wm_gmii_i82544_readreg: [mii interface function]
4503 *
4504 * Read a PHY register on the GMII.
4505 */
4506 static int
4507 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
4508 {
4509 struct wm_softc *sc = device_private(self);
4510 uint32_t mdic = 0;
4511 int i, rv;
4512
4513 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4514 MDIC_REGADD(reg));
4515
4516 for (i = 0; i < 320; i++) {
4517 mdic = CSR_READ(sc, WMREG_MDIC);
4518 if (mdic & MDIC_READY)
4519 break;
4520 delay(10);
4521 }
4522
4523 if ((mdic & MDIC_READY) == 0) {
4524 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4525 device_xname(&sc->sc_dev), phy, reg);
4526 rv = 0;
4527 } else if (mdic & MDIC_E) {
4528 #if 0 /* This is normal if no PHY is present. */
4529 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4530 device_xname(&sc->sc_dev), phy, reg);
4531 #endif
4532 rv = 0;
4533 } else {
4534 rv = MDIC_DATA(mdic);
4535 if (rv == 0xffff)
4536 rv = 0;
4537 }
4538
4539 return (rv);
4540 }
4541
4542 /*
4543 * wm_gmii_i82544_writereg: [mii interface function]
4544 *
4545 * Write a PHY register on the GMII.
4546 */
4547 static void
4548 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
4549 {
4550 struct wm_softc *sc = device_private(self);
4551 uint32_t mdic = 0;
4552 int i;
4553
4554 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4555 MDIC_REGADD(reg) | MDIC_DATA(val));
4556
4557 for (i = 0; i < 320; i++) {
4558 mdic = CSR_READ(sc, WMREG_MDIC);
4559 if (mdic & MDIC_READY)
4560 break;
4561 delay(10);
4562 }
4563
4564 if ((mdic & MDIC_READY) == 0)
4565 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4566 device_xname(&sc->sc_dev), phy, reg);
4567 else if (mdic & MDIC_E)
4568 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4569 device_xname(&sc->sc_dev), phy, reg);
4570 }
4571
4572 /*
4573 * wm_gmii_i80003_readreg: [mii interface function]
4574 *
4575 * Read a PHY register on the kumeran
4576 * This could be handled by the PHY layer if we didn't have to lock the
4577 * ressource ...
4578 */
4579 static int
4580 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
4581 {
4582 struct wm_softc *sc = device_private(self);
4583 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4584 int rv;
4585
4586 if (phy != 1) /* only one PHY on kumeran bus */
4587 return 0;
4588
4589 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4590 return 0;
4591
4592 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4593 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4594 reg >> GG82563_PAGE_SHIFT);
4595 } else {
4596 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4597 reg >> GG82563_PAGE_SHIFT);
4598 }
4599
4600 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4601 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4602 return (rv);
4603 }
4604
4605 /*
4606 * wm_gmii_i80003_writereg: [mii interface function]
4607 *
4608 * Write a PHY register on the kumeran.
4609 * This could be handled by the PHY layer if we didn't have to lock the
4610 * ressource ...
4611 */
4612 static void
4613 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
4614 {
4615 struct wm_softc *sc = device_private(self);
4616 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4617
4618 if (phy != 1) /* only one PHY on kumeran bus */
4619 return;
4620
4621 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4622 return;
4623
4624 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4625 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4626 reg >> GG82563_PAGE_SHIFT);
4627 } else {
4628 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4629 reg >> GG82563_PAGE_SHIFT);
4630 }
4631
4632 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4633 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4634 }
4635
4636 /*
4637 * wm_gmii_statchg: [mii interface function]
4638 *
4639 * Callback from MII layer when media changes.
4640 */
4641 static void
4642 wm_gmii_statchg(device_t self)
4643 {
4644 struct wm_softc *sc = device_private(self);
4645 struct mii_data *mii = &sc->sc_mii;
4646
4647 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4648 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4649 sc->sc_fcrtl &= ~FCRTL_XONE;
4650
4651 /*
4652 * Get flow control negotiation result.
4653 */
4654 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4655 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4656 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4657 mii->mii_media_active &= ~IFM_ETH_FMASK;
4658 }
4659
4660 if (sc->sc_flowflags & IFM_FLOW) {
4661 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4662 sc->sc_ctrl |= CTRL_TFCE;
4663 sc->sc_fcrtl |= FCRTL_XONE;
4664 }
4665 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4666 sc->sc_ctrl |= CTRL_RFCE;
4667 }
4668
4669 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4670 DPRINTF(WM_DEBUG_LINK,
4671 ("%s: LINK: statchg: FDX\n", device_xname(&sc->sc_dev)));
4672 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4673 } else {
4674 DPRINTF(WM_DEBUG_LINK,
4675 ("%s: LINK: statchg: HDX\n", device_xname(&sc->sc_dev)));
4676 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4677 }
4678
4679 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4680 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4681 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4682 : WMREG_FCRTL, sc->sc_fcrtl);
4683 if (sc->sc_type >= WM_T_80003) {
4684 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4685 case IFM_1000_T:
4686 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4687 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4688 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4689 break;
4690 default:
4691 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4692 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4693 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4694 break;
4695 }
4696 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4697 }
4698 }
4699
4700 /*
4701 * wm_kmrn_i80003_readreg:
4702 *
4703 * Read a kumeran register
4704 */
4705 static int
4706 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4707 {
4708 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4709 int rv;
4710
4711 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4712 return 0;
4713
4714 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4715 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4716 KUMCTRLSTA_REN);
4717 delay(2);
4718
4719 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4720 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4721 return (rv);
4722 }
4723
4724 /*
4725 * wm_kmrn_i80003_writereg:
4726 *
4727 * Write a kumeran register
4728 */
4729 static void
4730 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4731 {
4732 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4733
4734 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4735 return;
4736
4737 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4738 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4739 (val & KUMCTRLSTA_MASK));
4740 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4741 }
4742
4743 static int
4744 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4745 {
4746 uint32_t eecd = 0;
4747
4748 if (sc->sc_type == WM_T_82573) {
4749 eecd = CSR_READ(sc, WMREG_EECD);
4750
4751 /* Isolate bits 15 & 16 */
4752 eecd = ((eecd >> 15) & 0x03);
4753
4754 /* If both bits are set, device is Flash type */
4755 if (eecd == 0x03) {
4756 return 0;
4757 }
4758 }
4759 return 1;
4760 }
4761
4762 static int
4763 wm_get_swsm_semaphore(struct wm_softc *sc)
4764 {
4765 int32_t timeout;
4766 uint32_t swsm;
4767
4768 /* Get the FW semaphore. */
4769 timeout = 1000 + 1; /* XXX */
4770 while (timeout) {
4771 swsm = CSR_READ(sc, WMREG_SWSM);
4772 swsm |= SWSM_SWESMBI;
4773 CSR_WRITE(sc, WMREG_SWSM, swsm);
4774 /* if we managed to set the bit we got the semaphore. */
4775 swsm = CSR_READ(sc, WMREG_SWSM);
4776 if (swsm & SWSM_SWESMBI)
4777 break;
4778
4779 delay(50);
4780 timeout--;
4781 }
4782
4783 if (timeout == 0) {
4784 aprint_error_dev(&sc->sc_dev, "could not acquire EEPROM GNT\n");
4785 /* Release semaphores */
4786 wm_put_swsm_semaphore(sc);
4787 return 1;
4788 }
4789 return 0;
4790 }
4791
4792 static void
4793 wm_put_swsm_semaphore(struct wm_softc *sc)
4794 {
4795 uint32_t swsm;
4796
4797 swsm = CSR_READ(sc, WMREG_SWSM);
4798 swsm &= ~(SWSM_SWESMBI);
4799 CSR_WRITE(sc, WMREG_SWSM, swsm);
4800 }
4801
4802 static int
4803 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4804 {
4805 uint32_t swfw_sync;
4806 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4807 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4808 int timeout = 200;
4809
4810 for(timeout = 0; timeout < 200; timeout++) {
4811 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4812 if (wm_get_swsm_semaphore(sc))
4813 return 1;
4814 }
4815 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4816 if ((swfw_sync & (swmask | fwmask)) == 0) {
4817 swfw_sync |= swmask;
4818 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4819 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4820 wm_put_swsm_semaphore(sc);
4821 return 0;
4822 }
4823 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4824 wm_put_swsm_semaphore(sc);
4825 delay(5000);
4826 }
4827 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4828 device_xname(&sc->sc_dev), mask, swfw_sync);
4829 return 1;
4830 }
4831
4832 static void
4833 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4834 {
4835 uint32_t swfw_sync;
4836
4837 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4838 while (wm_get_swsm_semaphore(sc) != 0)
4839 continue;
4840 }
4841 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4842 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4843 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4844 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4845 wm_put_swsm_semaphore(sc);
4846 }
4847
4848 static int
4849 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4850 {
4851 uint32_t ext_ctrl;
4852 int timeout = 200;
4853
4854 for(timeout = 0; timeout < 200; timeout++) {
4855 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4856 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4857 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4858
4859 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4860 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4861 return 0;
4862 delay(5000);
4863 }
4864 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4865 device_xname(&sc->sc_dev), ext_ctrl);
4866 return 1;
4867 }
4868
4869 static void
4870 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4871 {
4872 uint32_t ext_ctrl;
4873 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4874 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4875 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4876 }
4877
4878 /******************************************************************************
4879 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4880 * register.
4881 *
4882 * sc - Struct containing variables accessed by shared code
4883 * offset - offset of word in the EEPROM to read
4884 * data - word read from the EEPROM
4885 * words - number of words to read
4886 *****************************************************************************/
4887 static int
4888 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4889 {
4890 int32_t error = 0;
4891 uint32_t flash_bank = 0;
4892 uint32_t act_offset = 0;
4893 uint32_t bank_offset = 0;
4894 uint16_t word = 0;
4895 uint16_t i = 0;
4896
4897 /* We need to know which is the valid flash bank. In the event
4898 * that we didn't allocate eeprom_shadow_ram, we may not be
4899 * managing flash_bank. So it cannot be trusted and needs
4900 * to be updated with each read.
4901 */
4902 /* Value of bit 22 corresponds to the flash bank we're on. */
4903 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4904
4905 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4906 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4907
4908 error = wm_get_swfwhw_semaphore(sc);
4909 if (error)
4910 return error;
4911
4912 for (i = 0; i < words; i++) {
4913 /* The NVM part needs a byte offset, hence * 2 */
4914 act_offset = bank_offset + ((offset + i) * 2);
4915 error = wm_read_ich8_word(sc, act_offset, &word);
4916 if (error)
4917 break;
4918 data[i] = word;
4919 }
4920
4921 wm_put_swfwhw_semaphore(sc);
4922 return error;
4923 }
4924
4925 /******************************************************************************
4926 * This function does initial flash setup so that a new read/write/erase cycle
4927 * can be started.
4928 *
4929 * sc - The pointer to the hw structure
4930 ****************************************************************************/
4931 static int32_t
4932 wm_ich8_cycle_init(struct wm_softc *sc)
4933 {
4934 uint16_t hsfsts;
4935 int32_t error = 1;
4936 int32_t i = 0;
4937
4938 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4939
4940 /* May be check the Flash Des Valid bit in Hw status */
4941 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4942 return error;
4943 }
4944
4945 /* Clear FCERR in Hw status by writing 1 */
4946 /* Clear DAEL in Hw status by writing a 1 */
4947 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4948
4949 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4950
4951 /* Either we should have a hardware SPI cycle in progress bit to check
4952 * against, in order to start a new cycle or FDONE bit should be changed
4953 * in the hardware so that it is 1 after harware reset, which can then be
4954 * used as an indication whether a cycle is in progress or has been
4955 * completed .. we should also have some software semaphore mechanism to
4956 * guard FDONE or the cycle in progress bit so that two threads access to
4957 * those bits can be sequentiallized or a way so that 2 threads dont
4958 * start the cycle at the same time */
4959
4960 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4961 /* There is no cycle running at present, so we can start a cycle */
4962 /* Begin by setting Flash Cycle Done. */
4963 hsfsts |= HSFSTS_DONE;
4964 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4965 error = 0;
4966 } else {
4967 /* otherwise poll for sometime so the current cycle has a chance
4968 * to end before giving up. */
4969 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
4970 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4971 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4972 error = 0;
4973 break;
4974 }
4975 delay(1);
4976 }
4977 if (error == 0) {
4978 /* Successful in waiting for previous cycle to timeout,
4979 * now set the Flash Cycle Done. */
4980 hsfsts |= HSFSTS_DONE;
4981 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4982 }
4983 }
4984 return error;
4985 }
4986
4987 /******************************************************************************
4988 * This function starts a flash cycle and waits for its completion
4989 *
4990 * sc - The pointer to the hw structure
4991 ****************************************************************************/
4992 static int32_t
4993 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
4994 {
4995 uint16_t hsflctl;
4996 uint16_t hsfsts;
4997 int32_t error = 1;
4998 uint32_t i = 0;
4999
5000 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5001 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5002 hsflctl |= HSFCTL_GO;
5003 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5004
5005 /* wait till FDONE bit is set to 1 */
5006 do {
5007 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5008 if (hsfsts & HSFSTS_DONE)
5009 break;
5010 delay(1);
5011 i++;
5012 } while (i < timeout);
5013 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5014 error = 0;
5015 }
5016 return error;
5017 }
5018
5019 /******************************************************************************
5020 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5021 *
5022 * sc - The pointer to the hw structure
5023 * index - The index of the byte or word to read.
5024 * size - Size of data to read, 1=byte 2=word
5025 * data - Pointer to the word to store the value read.
5026 *****************************************************************************/
5027 static int32_t
5028 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5029 uint32_t size, uint16_t* data)
5030 {
5031 uint16_t hsfsts;
5032 uint16_t hsflctl;
5033 uint32_t flash_linear_address;
5034 uint32_t flash_data = 0;
5035 int32_t error = 1;
5036 int32_t count = 0;
5037
5038 if (size < 1 || size > 2 || data == 0x0 ||
5039 index > ICH_FLASH_LINEAR_ADDR_MASK)
5040 return error;
5041
5042 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5043 sc->sc_ich8_flash_base;
5044
5045 do {
5046 delay(1);
5047 /* Steps */
5048 error = wm_ich8_cycle_init(sc);
5049 if (error)
5050 break;
5051
5052 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5053 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5054 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5055 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5056 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5057
5058 /* Write the last 24 bits of index into Flash Linear address field in
5059 * Flash Address */
5060 /* TODO: TBD maybe check the index against the size of flash */
5061
5062 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5063
5064 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5065
5066 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5067 * sequence a few more times, else read in (shift in) the Flash Data0,
5068 * the order is least significant byte first msb to lsb */
5069 if (error == 0) {
5070 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5071 if (size == 1) {
5072 *data = (uint8_t)(flash_data & 0x000000FF);
5073 } else if (size == 2) {
5074 *data = (uint16_t)(flash_data & 0x0000FFFF);
5075 }
5076 break;
5077 } else {
5078 /* If we've gotten here, then things are probably completely hosed,
5079 * but if the error condition is detected, it won't hurt to give
5080 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5081 */
5082 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5083 if (hsfsts & HSFSTS_ERR) {
5084 /* Repeat for some time before giving up. */
5085 continue;
5086 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5087 break;
5088 }
5089 }
5090 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5091
5092 return error;
5093 }
5094
5095 #if 0
5096 /******************************************************************************
5097 * Reads a single byte from the NVM using the ICH8 flash access registers.
5098 *
5099 * sc - pointer to wm_hw structure
5100 * index - The index of the byte to read.
5101 * data - Pointer to a byte to store the value read.
5102 *****************************************************************************/
5103 static int32_t
5104 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5105 {
5106 int32_t status;
5107 uint16_t word = 0;
5108
5109 status = wm_read_ich8_data(sc, index, 1, &word);
5110 if (status == 0) {
5111 *data = (uint8_t)word;
5112 }
5113
5114 return status;
5115 }
5116 #endif
5117
5118 /******************************************************************************
5119 * Reads a word from the NVM using the ICH8 flash access registers.
5120 *
5121 * sc - pointer to wm_hw structure
5122 * index - The starting byte index of the word to read.
5123 * data - Pointer to a word to store the value read.
5124 *****************************************************************************/
5125 static int32_t
5126 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5127 {
5128 int32_t status;
5129
5130 status = wm_read_ich8_data(sc, index, 2, data);
5131 return status;
5132 }
5133