if_wm.c revision 1.144 1 /* $NetBSD: if_wm.c,v 1.144 2007/08/28 01:10:34 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers.
78 * - Fix hw VLAN assist.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.144 2007/08/28 01:10:34 msaitoh Exp $");
83
84 #include "bpfilter.h"
85 #include "rnd.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <machine/bus.h>
122 #include <machine/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133
134 #include <dev/pci/if_wmreg.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 typedef enum {
236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */
253 WM_T_80003, /* i80003 */
254 WM_T_ICH8, /* ICH8 LAN */
255 WM_T_ICH9, /* ICH9 LAN */
256 } wm_chip_type;
257
258 /*
259 * Software state per device.
260 */
261 struct wm_softc {
262 struct device sc_dev; /* generic device information */
263 bus_space_tag_t sc_st; /* bus space tag */
264 bus_space_handle_t sc_sh; /* bus space handle */
265 bus_space_tag_t sc_iot; /* I/O space tag */
266 bus_space_handle_t sc_ioh; /* I/O space handle */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270 struct ethercom sc_ethercom; /* ethernet common data */
271 void *sc_sdhook; /* shutdown hook */
272 void *sc_powerhook; /* power hook */
273 pci_chipset_tag_t sc_pc;
274 pcitag_t sc_pcitag;
275 struct pci_conf_state sc_pciconf;
276
277 wm_chip_type sc_type; /* chip type */
278 int sc_flags; /* flags; see below */
279 int sc_bus_speed; /* PCI/PCIX bus speed */
280 int sc_pcix_offset; /* PCIX capability register offset */
281 int sc_flowflags; /* 802.3x flow control flags */
282
283 void *sc_ih; /* interrupt cookie */
284
285 int sc_ee_addrbits; /* EEPROM address bits */
286
287 struct mii_data sc_mii; /* MII/media information */
288
289 callout_t sc_tick_ch; /* tick callout */
290
291 bus_dmamap_t sc_cddmamap; /* control data DMA map */
292 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
293
294 int sc_align_tweak;
295
296 /*
297 * Software state for the transmit and receive descriptors.
298 */
299 int sc_txnum; /* must be a power of two */
300 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
301 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
302
303 /*
304 * Control data structures.
305 */
306 int sc_ntxdesc; /* must be a power of two */
307 struct wm_control_data_82544 *sc_control_data;
308 #define sc_txdescs sc_control_data->wcd_txdescs
309 #define sc_rxdescs sc_control_data->wcd_rxdescs
310
311 #ifdef WM_EVENT_COUNTERS
312 /* Event counters. */
313 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
314 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
315 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
316 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
317 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
318 struct evcnt sc_ev_rxintr; /* Rx interrupts */
319 struct evcnt sc_ev_linkintr; /* Link interrupts */
320
321 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
322 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
323 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
324 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
325 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
326 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
327 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
328 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
329
330 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
331 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
332
333 struct evcnt sc_ev_tu; /* Tx underrun */
334
335 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
336 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
337 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
338 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
339 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
340 #endif /* WM_EVENT_COUNTERS */
341
342 bus_addr_t sc_tdt_reg; /* offset of TDT register */
343
344 int sc_txfree; /* number of free Tx descriptors */
345 int sc_txnext; /* next ready Tx descriptor */
346
347 int sc_txsfree; /* number of free Tx jobs */
348 int sc_txsnext; /* next free Tx job */
349 int sc_txsdirty; /* dirty Tx jobs */
350
351 /* These 5 variables are used only on the 82547. */
352 int sc_txfifo_size; /* Tx FIFO size */
353 int sc_txfifo_head; /* current head of FIFO */
354 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
355 int sc_txfifo_stall; /* Tx FIFO is stalled */
356 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
357
358 bus_addr_t sc_rdt_reg; /* offset of RDT register */
359
360 int sc_rxptr; /* next ready Rx descriptor/queue ent */
361 int sc_rxdiscard;
362 int sc_rxlen;
363 struct mbuf *sc_rxhead;
364 struct mbuf *sc_rxtail;
365 struct mbuf **sc_rxtailp;
366
367 uint32_t sc_ctrl; /* prototype CTRL register */
368 #if 0
369 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
370 #endif
371 uint32_t sc_icr; /* prototype interrupt bits */
372 uint32_t sc_itr; /* prototype intr throttling reg */
373 uint32_t sc_tctl; /* prototype TCTL register */
374 uint32_t sc_rctl; /* prototype RCTL register */
375 uint32_t sc_txcw; /* prototype TXCW register */
376 uint32_t sc_tipg; /* prototype TIPG register */
377 uint32_t sc_fcrtl; /* prototype FCRTL register */
378 uint32_t sc_pba; /* prototype PBA register */
379
380 int sc_tbi_linkup; /* TBI link status */
381 int sc_tbi_anstate; /* autonegotiation state */
382
383 int sc_mchash_type; /* multicast filter offset */
384
385 #if NRND > 0
386 rndsource_element_t rnd_source; /* random source */
387 #endif
388 int sc_ich8_flash_base;
389 int sc_ich8_flash_bank_size;
390 };
391
392 #define WM_RXCHAIN_RESET(sc) \
393 do { \
394 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
395 *(sc)->sc_rxtailp = NULL; \
396 (sc)->sc_rxlen = 0; \
397 } while (/*CONSTCOND*/0)
398
399 #define WM_RXCHAIN_LINK(sc, m) \
400 do { \
401 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
402 (sc)->sc_rxtailp = &(m)->m_next; \
403 } while (/*CONSTCOND*/0)
404
405 /* sc_flags */
406 #define WM_F_HAS_MII 0x0001 /* has MII */
407 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
408 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
409 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
410 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
411 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
412 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
413 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
414 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
415 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
416 #define WM_F_CSA 0x0400 /* bus is CSA */
417 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
418 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
419 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
420
421 #ifdef WM_EVENT_COUNTERS
422 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
423 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
424 #else
425 #define WM_EVCNT_INCR(ev) /* nothing */
426 #define WM_EVCNT_ADD(ev, val) /* nothing */
427 #endif
428
429 #define CSR_READ(sc, reg) \
430 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
431 #define CSR_WRITE(sc, reg, val) \
432 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
433 #define CSR_WRITE_FLUSH(sc) \
434 (void) CSR_READ((sc), WMREG_STATUS)
435
436 #define ICH8_FLASH_READ32(sc, reg) \
437 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
438 #define ICH8_FLASH_WRITE32(sc, reg, data) \
439 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
440
441 #define ICH8_FLASH_READ16(sc, reg) \
442 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
443 #define ICH8_FLASH_WRITE16(sc, reg, data) \
444 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
445
446 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
447 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
448
449 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
450 #define WM_CDTXADDR_HI(sc, x) \
451 (sizeof(bus_addr_t) == 8 ? \
452 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
453
454 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
455 #define WM_CDRXADDR_HI(sc, x) \
456 (sizeof(bus_addr_t) == 8 ? \
457 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
458
459 #define WM_CDTXSYNC(sc, x, n, ops) \
460 do { \
461 int __x, __n; \
462 \
463 __x = (x); \
464 __n = (n); \
465 \
466 /* If it will wrap around, sync to the end of the ring. */ \
467 if ((__x + __n) > WM_NTXDESC(sc)) { \
468 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
469 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
470 (WM_NTXDESC(sc) - __x), (ops)); \
471 __n -= (WM_NTXDESC(sc) - __x); \
472 __x = 0; \
473 } \
474 \
475 /* Now sync whatever is left. */ \
476 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
477 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
478 } while (/*CONSTCOND*/0)
479
480 #define WM_CDRXSYNC(sc, x, ops) \
481 do { \
482 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
483 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
484 } while (/*CONSTCOND*/0)
485
486 #define WM_INIT_RXDESC(sc, x) \
487 do { \
488 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
489 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
490 struct mbuf *__m = __rxs->rxs_mbuf; \
491 \
492 /* \
493 * Note: We scoot the packet forward 2 bytes in the buffer \
494 * so that the payload after the Ethernet header is aligned \
495 * to a 4-byte boundary. \
496 * \
497 * XXX BRAINDAMAGE ALERT! \
498 * The stupid chip uses the same size for every buffer, which \
499 * is set in the Receive Control register. We are using the 2K \
500 * size option, but what we REALLY want is (2K - 2)! For this \
501 * reason, we can't "scoot" packets longer than the standard \
502 * Ethernet MTU. On strict-alignment platforms, if the total \
503 * size exceeds (2K - 2) we set align_tweak to 0 and let \
504 * the upper layer copy the headers. \
505 */ \
506 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
507 \
508 wm_set_dma_addr(&__rxd->wrx_addr, \
509 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
510 __rxd->wrx_len = 0; \
511 __rxd->wrx_cksum = 0; \
512 __rxd->wrx_status = 0; \
513 __rxd->wrx_errors = 0; \
514 __rxd->wrx_special = 0; \
515 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
516 \
517 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
518 } while (/*CONSTCOND*/0)
519
520 static void wm_start(struct ifnet *);
521 static void wm_watchdog(struct ifnet *);
522 static int wm_ioctl(struct ifnet *, u_long, void *);
523 static int wm_init(struct ifnet *);
524 static void wm_stop(struct ifnet *, int);
525
526 static void wm_shutdown(void *);
527 static void wm_powerhook(int, void *);
528
529 static void wm_reset(struct wm_softc *);
530 static void wm_rxdrain(struct wm_softc *);
531 static int wm_add_rxbuf(struct wm_softc *, int);
532 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
533 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
534 static int wm_validate_eeprom_checksum(struct wm_softc *);
535 static void wm_tick(void *);
536
537 static void wm_set_filter(struct wm_softc *);
538
539 static int wm_intr(void *);
540 static void wm_txintr(struct wm_softc *);
541 static void wm_rxintr(struct wm_softc *);
542 static void wm_linkintr(struct wm_softc *, uint32_t);
543
544 static void wm_tbi_mediainit(struct wm_softc *);
545 static int wm_tbi_mediachange(struct ifnet *);
546 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
547
548 static void wm_tbi_set_linkled(struct wm_softc *);
549 static void wm_tbi_check_link(struct wm_softc *);
550
551 static void wm_gmii_reset(struct wm_softc *);
552
553 static int wm_gmii_i82543_readreg(struct device *, int, int);
554 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
555
556 static int wm_gmii_i82544_readreg(struct device *, int, int);
557 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
558
559 static int wm_gmii_i80003_readreg(struct device *, int, int);
560 static void wm_gmii_i80003_writereg(struct device *, int, int, int);
561
562 static void wm_gmii_statchg(struct device *);
563
564 static void wm_gmii_mediainit(struct wm_softc *);
565 static int wm_gmii_mediachange(struct ifnet *);
566 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
567
568 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
569 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
570
571 static int wm_match(struct device *, struct cfdata *, void *);
572 static void wm_attach(struct device *, struct device *, void *);
573 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
574 static int wm_get_swsm_semaphore(struct wm_softc *);
575 static void wm_put_swsm_semaphore(struct wm_softc *);
576 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
577 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
578 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
579 static int wm_get_swfwhw_semaphore(struct wm_softc *);
580 static void wm_put_swfwhw_semaphore(struct wm_softc *);
581
582 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
583 static int32_t wm_ich8_cycle_init(struct wm_softc *);
584 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
585 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
586 uint32_t, uint16_t *);
587 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
588
589 CFATTACH_DECL(wm, sizeof(struct wm_softc),
590 wm_match, wm_attach, NULL, NULL);
591
592 static void wm_82547_txfifo_stall(void *);
593
594 /*
595 * Devices supported by this driver.
596 */
597 static const struct wm_product {
598 pci_vendor_id_t wmp_vendor;
599 pci_product_id_t wmp_product;
600 const char *wmp_name;
601 wm_chip_type wmp_type;
602 int wmp_flags;
603 #define WMP_F_1000X 0x01
604 #define WMP_F_1000T 0x02
605 } wm_products[] = {
606 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
607 "Intel i82542 1000BASE-X Ethernet",
608 WM_T_82542_2_1, WMP_F_1000X },
609
610 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
611 "Intel i82543GC 1000BASE-X Ethernet",
612 WM_T_82543, WMP_F_1000X },
613
614 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
615 "Intel i82543GC 1000BASE-T Ethernet",
616 WM_T_82543, WMP_F_1000T },
617
618 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
619 "Intel i82544EI 1000BASE-T Ethernet",
620 WM_T_82544, WMP_F_1000T },
621
622 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
623 "Intel i82544EI 1000BASE-X Ethernet",
624 WM_T_82544, WMP_F_1000X },
625
626 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
627 "Intel i82544GC 1000BASE-T Ethernet",
628 WM_T_82544, WMP_F_1000T },
629
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
631 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
632 WM_T_82544, WMP_F_1000T },
633
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
635 "Intel i82540EM 1000BASE-T Ethernet",
636 WM_T_82540, WMP_F_1000T },
637
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
639 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
640 WM_T_82540, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
643 "Intel i82540EP 1000BASE-T Ethernet",
644 WM_T_82540, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
647 "Intel i82540EP 1000BASE-T Ethernet",
648 WM_T_82540, WMP_F_1000T },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
651 "Intel i82540EP 1000BASE-T Ethernet",
652 WM_T_82540, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
655 "Intel i82545EM 1000BASE-T Ethernet",
656 WM_T_82545, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
659 "Intel i82545GM 1000BASE-T Ethernet",
660 WM_T_82545_3, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
663 "Intel i82545GM 1000BASE-X Ethernet",
664 WM_T_82545_3, WMP_F_1000X },
665 #if 0
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
667 "Intel i82545GM Gigabit Ethernet (SERDES)",
668 WM_T_82545_3, WMP_F_SERDES },
669 #endif
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
671 "Intel i82546EB 1000BASE-T Ethernet",
672 WM_T_82546, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
675 "Intel i82546EB 1000BASE-T Ethernet",
676 WM_T_82546, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
679 "Intel i82545EM 1000BASE-X Ethernet",
680 WM_T_82545, WMP_F_1000X },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
683 "Intel i82546EB 1000BASE-X Ethernet",
684 WM_T_82546, WMP_F_1000X },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
687 "Intel i82546GB 1000BASE-T Ethernet",
688 WM_T_82546_3, WMP_F_1000T },
689
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
691 "Intel i82546GB 1000BASE-X Ethernet",
692 WM_T_82546_3, WMP_F_1000X },
693 #if 0
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
695 "Intel i82546GB Gigabit Ethernet (SERDES)",
696 WM_T_82546_3, WMP_F_SERDES },
697 #endif
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
699 "i82546GB quad-port Gigabit Ethernet",
700 WM_T_82546_3, WMP_F_1000T },
701
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
703 "i82546GB quad-port Gigabit Ethernet (KSP3)",
704 WM_T_82546_3, WMP_F_1000T },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
707 "Intel PRO/1000MT (82546GB)",
708 WM_T_82546_3, WMP_F_1000T },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
711 "Intel i82541EI 1000BASE-T Ethernet",
712 WM_T_82541, WMP_F_1000T },
713
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
715 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
716 WM_T_82541, WMP_F_1000T },
717
718 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
719 "Intel i82541EI Mobile 1000BASE-T Ethernet",
720 WM_T_82541, WMP_F_1000T },
721
722 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
723 "Intel i82541ER 1000BASE-T Ethernet",
724 WM_T_82541_2, WMP_F_1000T },
725
726 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
727 "Intel i82541GI 1000BASE-T Ethernet",
728 WM_T_82541_2, WMP_F_1000T },
729
730 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
731 "Intel i82541GI Mobile 1000BASE-T Ethernet",
732 WM_T_82541_2, WMP_F_1000T },
733
734 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
735 "Intel i82541PI 1000BASE-T Ethernet",
736 WM_T_82541_2, WMP_F_1000T },
737
738 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
739 "Intel i82547EI 1000BASE-T Ethernet",
740 WM_T_82547, WMP_F_1000T },
741
742 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
743 "Intel i82547EI Mobile 1000BASE-T Ethernet",
744 WM_T_82547, WMP_F_1000T },
745
746 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
747 "Intel i82547GI 1000BASE-T Ethernet",
748 WM_T_82547_2, WMP_F_1000T },
749
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
751 "Intel PRO/1000 PT (82571EB)",
752 WM_T_82571, WMP_F_1000T },
753
754 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
755 "Intel PRO/1000 PF (82571EB)",
756 WM_T_82571, WMP_F_1000X },
757 #if 0
758 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
759 "Intel PRO/1000 PB (82571EB)",
760 WM_T_82571, WMP_F_SERDES },
761 #endif
762 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
763 "Intel PRO/1000 QT (82571EB)",
764 WM_T_82571, WMP_F_1000T },
765
766 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
767 "Intel i82572EI 1000baseT Ethernet",
768 WM_T_82572, WMP_F_1000T },
769
770 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
771 "Intel i82572EI 1000baseX Ethernet",
772 WM_T_82572, WMP_F_1000X },
773 #if 0
774 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
775 "Intel i82572EI Gigabit Ethernet (SERDES)",
776 WM_T_82572, WMP_F_SERDES },
777 #endif
778
779 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
780 "Intel i82572EI 1000baseT Ethernet",
781 WM_T_82572, WMP_F_1000T },
782
783 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
784 "Intel i82573E",
785 WM_T_82573, WMP_F_1000T },
786
787 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
788 "Intel i82573E IAMT",
789 WM_T_82573, WMP_F_1000T },
790
791 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
792 "Intel i82573L Gigabit Ethernet",
793 WM_T_82573, WMP_F_1000T },
794
795 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
796 "i80003 dual 1000baseT Ethernet",
797 WM_T_80003, WMP_F_1000T },
798
799 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
800 "i80003 dual 1000baseX Ethernet",
801 WM_T_80003, WMP_F_1000T },
802 #if 0
803 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
804 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
805 WM_T_80003, WMP_F_SERDES },
806 #endif
807
808 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
809 "Intel i80003 1000baseT Ethernet",
810 WM_T_80003, WMP_F_1000T },
811 #if 0
812 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
813 "Intel i80003 Gigabit Ethernet (SERDES)",
814 WM_T_80003, WMP_F_SERDES },
815 #endif
816 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
817 "Intel i82801H (M_AMT) LAN Controller",
818 WM_T_ICH8, WMP_F_1000T },
819 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
820 "Intel i82801H (AMT) LAN Controller",
821 WM_T_ICH8, WMP_F_1000T },
822 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
823 "Intel i82801H LAN Controller",
824 WM_T_ICH8, WMP_F_1000T },
825 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
826 "Intel i82801H (IFE) LAN Controller",
827 WM_T_ICH8, WMP_F_1000T },
828 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
829 "Intel i82801H (M) LAN Controller",
830 WM_T_ICH8, WMP_F_1000T },
831 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
832 "Intel i82801H IFE (GT) LAN Controller",
833 WM_T_ICH8, WMP_F_1000T },
834 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
835 "Intel i82801H IFE (G) LAN Controller",
836 WM_T_ICH8, WMP_F_1000T },
837 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
838 "82801I (AMT) LAN Controller",
839 WM_T_ICH9, WMP_F_1000T },
840 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
841 "82801I LAN Controller",
842 WM_T_ICH9, WMP_F_1000T },
843 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
844 "82801I (G) LAN Controller",
845 WM_T_ICH9, WMP_F_1000T },
846 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
847 "82801I (GT) LAN Controller",
848 WM_T_ICH9, WMP_F_1000T },
849 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
850 "82801I (C) LAN Controller",
851 WM_T_ICH9, WMP_F_1000T },
852 { 0, 0,
853 NULL,
854 0, 0 },
855 };
856
857 #ifdef WM_EVENT_COUNTERS
858 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
859 #endif /* WM_EVENT_COUNTERS */
860
861 #if 0 /* Not currently used */
862 static inline uint32_t
863 wm_io_read(struct wm_softc *sc, int reg)
864 {
865
866 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
867 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
868 }
869 #endif
870
871 static inline void
872 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
873 {
874
875 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
876 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
877 }
878
879 static inline void
880 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
881 {
882 wa->wa_low = htole32(v & 0xffffffffU);
883 if (sizeof(bus_addr_t) == 8)
884 wa->wa_high = htole32((uint64_t) v >> 32);
885 else
886 wa->wa_high = 0;
887 }
888
889 static const struct wm_product *
890 wm_lookup(const struct pci_attach_args *pa)
891 {
892 const struct wm_product *wmp;
893
894 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
895 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
896 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
897 return (wmp);
898 }
899 return (NULL);
900 }
901
902 static int
903 wm_match(struct device *parent, struct cfdata *cf, void *aux)
904 {
905 struct pci_attach_args *pa = aux;
906
907 if (wm_lookup(pa) != NULL)
908 return (1);
909
910 return (0);
911 }
912
913 static void
914 wm_attach(struct device *parent, struct device *self, void *aux)
915 {
916 struct wm_softc *sc = (void *) self;
917 struct pci_attach_args *pa = aux;
918 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
919 pci_chipset_tag_t pc = pa->pa_pc;
920 pci_intr_handle_t ih;
921 size_t cdata_size;
922 const char *intrstr = NULL;
923 const char *eetype;
924 bus_space_tag_t memt;
925 bus_space_handle_t memh;
926 bus_dma_segment_t seg;
927 int memh_valid;
928 int i, rseg, error;
929 const struct wm_product *wmp;
930 prop_data_t ea;
931 prop_number_t pn;
932 uint8_t enaddr[ETHER_ADDR_LEN];
933 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
934 pcireg_t preg, memtype;
935 uint32_t reg;
936
937 callout_init(&sc->sc_tick_ch, 0);
938
939 wmp = wm_lookup(pa);
940 if (wmp == NULL) {
941 printf("\n");
942 panic("wm_attach: impossible");
943 }
944
945 sc->sc_pc = pa->pa_pc;
946 sc->sc_pcitag = pa->pa_tag;
947
948 if (pci_dma64_available(pa))
949 sc->sc_dmat = pa->pa_dmat64;
950 else
951 sc->sc_dmat = pa->pa_dmat;
952
953 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
954 aprint_naive(": Ethernet controller\n");
955 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
956
957 sc->sc_type = wmp->wmp_type;
958 if (sc->sc_type < WM_T_82543) {
959 if (preg < 2) {
960 aprint_error("%s: i82542 must be at least rev. 2\n",
961 sc->sc_dev.dv_xname);
962 return;
963 }
964 if (preg < 3)
965 sc->sc_type = WM_T_82542_2_0;
966 }
967
968 /*
969 * Map the device. All devices support memory-mapped acccess,
970 * and it is really required for normal operation.
971 */
972 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
973 switch (memtype) {
974 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
975 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
976 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
977 memtype, 0, &memt, &memh, NULL, NULL) == 0);
978 break;
979 default:
980 memh_valid = 0;
981 }
982
983 if (memh_valid) {
984 sc->sc_st = memt;
985 sc->sc_sh = memh;
986 } else {
987 aprint_error("%s: unable to map device registers\n",
988 sc->sc_dev.dv_xname);
989 return;
990 }
991
992 /*
993 * In addition, i82544 and later support I/O mapped indirect
994 * register access. It is not desirable (nor supported in
995 * this driver) to use it for normal operation, though it is
996 * required to work around bugs in some chip versions.
997 */
998 if (sc->sc_type >= WM_T_82544) {
999 /* First we have to find the I/O BAR. */
1000 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1001 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1002 PCI_MAPREG_TYPE_IO)
1003 break;
1004 }
1005 if (i == PCI_MAPREG_END)
1006 aprint_error("%s: WARNING: unable to find I/O BAR\n",
1007 sc->sc_dev.dv_xname);
1008 else {
1009 /*
1010 * The i8254x doesn't apparently respond when the
1011 * I/O BAR is 0, which looks somewhat like it's not
1012 * been configured.
1013 */
1014 preg = pci_conf_read(pc, pa->pa_tag, i);
1015 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1016 aprint_error("%s: WARNING: I/O BAR at zero.\n",
1017 sc->sc_dev.dv_xname);
1018 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1019 0, &sc->sc_iot, &sc->sc_ioh,
1020 NULL, NULL) == 0) {
1021 sc->sc_flags |= WM_F_IOH_VALID;
1022 } else {
1023 aprint_error("%s: WARNING: unable to map "
1024 "I/O space\n", sc->sc_dev.dv_xname);
1025 }
1026 }
1027
1028 }
1029
1030 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1031 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1032 preg |= PCI_COMMAND_MASTER_ENABLE;
1033 if (sc->sc_type < WM_T_82542_2_1)
1034 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1035 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1036
1037 /* power up chip */
1038 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
1039 NULL)) && error != EOPNOTSUPP) {
1040 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
1041 error);
1042 return;
1043 }
1044
1045 /*
1046 * Map and establish our interrupt.
1047 */
1048 if (pci_intr_map(pa, &ih)) {
1049 aprint_error("%s: unable to map interrupt\n",
1050 sc->sc_dev.dv_xname);
1051 return;
1052 }
1053 intrstr = pci_intr_string(pc, ih);
1054 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1055 if (sc->sc_ih == NULL) {
1056 aprint_error("%s: unable to establish interrupt",
1057 sc->sc_dev.dv_xname);
1058 if (intrstr != NULL)
1059 aprint_normal(" at %s", intrstr);
1060 aprint_normal("\n");
1061 return;
1062 }
1063 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
1064
1065 /*
1066 * Determine a few things about the bus we're connected to.
1067 */
1068 if (sc->sc_type < WM_T_82543) {
1069 /* We don't really know the bus characteristics here. */
1070 sc->sc_bus_speed = 33;
1071 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1072 /*
1073 * CSA (Communication Streaming Architecture) is about as fast
1074 * a 32-bit 66MHz PCI Bus.
1075 */
1076 sc->sc_flags |= WM_F_CSA;
1077 sc->sc_bus_speed = 66;
1078 aprint_verbose("%s: Communication Streaming Architecture\n",
1079 sc->sc_dev.dv_xname);
1080 if (sc->sc_type == WM_T_82547) {
1081 callout_init(&sc->sc_txfifo_ch, 0);
1082 callout_setfunc(&sc->sc_txfifo_ch,
1083 wm_82547_txfifo_stall, sc);
1084 aprint_verbose("%s: using 82547 Tx FIFO stall "
1085 "work-around\n", sc->sc_dev.dv_xname);
1086 }
1087 } else if (sc->sc_type >= WM_T_82571) {
1088 sc->sc_flags |= WM_F_PCIE;
1089 if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9))
1090 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1091 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
1092 } else {
1093 reg = CSR_READ(sc, WMREG_STATUS);
1094 if (reg & STATUS_BUS64)
1095 sc->sc_flags |= WM_F_BUS64;
1096 if (sc->sc_type >= WM_T_82544 &&
1097 (reg & STATUS_PCIX_MODE) != 0) {
1098 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1099
1100 sc->sc_flags |= WM_F_PCIX;
1101 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1102 PCI_CAP_PCIX,
1103 &sc->sc_pcix_offset, NULL) == 0)
1104 aprint_error("%s: unable to find PCIX "
1105 "capability\n", sc->sc_dev.dv_xname);
1106 else if (sc->sc_type != WM_T_82545_3 &&
1107 sc->sc_type != WM_T_82546_3) {
1108 /*
1109 * Work around a problem caused by the BIOS
1110 * setting the max memory read byte count
1111 * incorrectly.
1112 */
1113 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1114 sc->sc_pcix_offset + PCI_PCIX_CMD);
1115 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1116 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1117
1118 bytecnt =
1119 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1120 PCI_PCIX_CMD_BYTECNT_SHIFT;
1121 maxb =
1122 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1123 PCI_PCIX_STATUS_MAXB_SHIFT;
1124 if (bytecnt > maxb) {
1125 aprint_verbose("%s: resetting PCI-X "
1126 "MMRBC: %d -> %d\n",
1127 sc->sc_dev.dv_xname,
1128 512 << bytecnt, 512 << maxb);
1129 pcix_cmd = (pcix_cmd &
1130 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1131 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1132 pci_conf_write(pa->pa_pc, pa->pa_tag,
1133 sc->sc_pcix_offset + PCI_PCIX_CMD,
1134 pcix_cmd);
1135 }
1136 }
1137 }
1138 /*
1139 * The quad port adapter is special; it has a PCIX-PCIX
1140 * bridge on the board, and can run the secondary bus at
1141 * a higher speed.
1142 */
1143 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1144 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1145 : 66;
1146 } else if (sc->sc_flags & WM_F_PCIX) {
1147 switch (reg & STATUS_PCIXSPD_MASK) {
1148 case STATUS_PCIXSPD_50_66:
1149 sc->sc_bus_speed = 66;
1150 break;
1151 case STATUS_PCIXSPD_66_100:
1152 sc->sc_bus_speed = 100;
1153 break;
1154 case STATUS_PCIXSPD_100_133:
1155 sc->sc_bus_speed = 133;
1156 break;
1157 default:
1158 aprint_error(
1159 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
1160 sc->sc_dev.dv_xname,
1161 reg & STATUS_PCIXSPD_MASK);
1162 sc->sc_bus_speed = 66;
1163 }
1164 } else
1165 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1166 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
1167 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1168 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1169 }
1170
1171 /*
1172 * Allocate the control data structures, and create and load the
1173 * DMA map for it.
1174 *
1175 * NOTE: All Tx descriptors must be in the same 4G segment of
1176 * memory. So must Rx descriptors. We simplify by allocating
1177 * both sets within the same 4G segment.
1178 */
1179 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1180 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1181 cdata_size = sc->sc_type < WM_T_82544 ?
1182 sizeof(struct wm_control_data_82542) :
1183 sizeof(struct wm_control_data_82544);
1184 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1185 (bus_size_t) 0x100000000ULL,
1186 &seg, 1, &rseg, 0)) != 0) {
1187 aprint_error(
1188 "%s: unable to allocate control data, error = %d\n",
1189 sc->sc_dev.dv_xname, error);
1190 goto fail_0;
1191 }
1192
1193 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1194 (void **)&sc->sc_control_data, 0)) != 0) {
1195 aprint_error("%s: unable to map control data, error = %d\n",
1196 sc->sc_dev.dv_xname, error);
1197 goto fail_1;
1198 }
1199
1200 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1201 0, 0, &sc->sc_cddmamap)) != 0) {
1202 aprint_error("%s: unable to create control data DMA map, "
1203 "error = %d\n", sc->sc_dev.dv_xname, error);
1204 goto fail_2;
1205 }
1206
1207 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1208 sc->sc_control_data, cdata_size, NULL,
1209 0)) != 0) {
1210 aprint_error(
1211 "%s: unable to load control data DMA map, error = %d\n",
1212 sc->sc_dev.dv_xname, error);
1213 goto fail_3;
1214 }
1215
1216
1217 /*
1218 * Create the transmit buffer DMA maps.
1219 */
1220 WM_TXQUEUELEN(sc) =
1221 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1222 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1223 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1224 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1225 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1226 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1227 aprint_error("%s: unable to create Tx DMA map %d, "
1228 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1229 goto fail_4;
1230 }
1231 }
1232
1233 /*
1234 * Create the receive buffer DMA maps.
1235 */
1236 for (i = 0; i < WM_NRXDESC; i++) {
1237 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1238 MCLBYTES, 0, 0,
1239 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1240 aprint_error("%s: unable to create Rx DMA map %d, "
1241 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1242 goto fail_5;
1243 }
1244 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1245 }
1246
1247 /* clear interesting stat counters */
1248 CSR_READ(sc, WMREG_COLC);
1249 CSR_READ(sc, WMREG_RXERRC);
1250
1251 /*
1252 * Reset the chip to a known state.
1253 */
1254 wm_reset(sc);
1255
1256 /*
1257 * Get some information about the EEPROM.
1258 */
1259 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1260 uint32_t flash_size;
1261 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1262 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1263 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1264 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1265 printf("%s: can't map FLASH registers\n",
1266 sc->sc_dev.dv_xname);
1267 return;
1268 }
1269 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1270 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1271 ICH_FLASH_SECTOR_SIZE;
1272 sc->sc_ich8_flash_bank_size =
1273 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1274 sc->sc_ich8_flash_bank_size -=
1275 (flash_size & ICH_GFPREG_BASE_MASK);
1276 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1277 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1278 } else if (sc->sc_type == WM_T_80003)
1279 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1280 else if (sc->sc_type == WM_T_82573)
1281 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1282 else if (sc->sc_type > WM_T_82544)
1283 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1284
1285 if (sc->sc_type <= WM_T_82544)
1286 sc->sc_ee_addrbits = 6;
1287 else if (sc->sc_type <= WM_T_82546_3) {
1288 reg = CSR_READ(sc, WMREG_EECD);
1289 if (reg & EECD_EE_SIZE)
1290 sc->sc_ee_addrbits = 8;
1291 else
1292 sc->sc_ee_addrbits = 6;
1293 } else if (sc->sc_type <= WM_T_82547_2) {
1294 reg = CSR_READ(sc, WMREG_EECD);
1295 if (reg & EECD_EE_TYPE) {
1296 sc->sc_flags |= WM_F_EEPROM_SPI;
1297 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1298 } else
1299 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1300 } else if ((sc->sc_type == WM_T_82573) &&
1301 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1302 sc->sc_flags |= WM_F_EEPROM_FLASH;
1303 } else {
1304 /* Assume everything else is SPI. */
1305 reg = CSR_READ(sc, WMREG_EECD);
1306 sc->sc_flags |= WM_F_EEPROM_SPI;
1307 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1308 }
1309
1310 /*
1311 * Defer printing the EEPROM type until after verifying the checksum
1312 * This allows the EEPROM type to be printed correctly in the case
1313 * that no EEPROM is attached.
1314 */
1315
1316
1317 /*
1318 * Validate the EEPROM checksum. If the checksum fails, flag this for
1319 * later, so we can fail future reads from the EEPROM.
1320 */
1321 if (wm_validate_eeprom_checksum(sc))
1322 sc->sc_flags |= WM_F_EEPROM_INVALID;
1323
1324 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1325 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
1326 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1327 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
1328 } else {
1329 if (sc->sc_flags & WM_F_EEPROM_SPI)
1330 eetype = "SPI";
1331 else
1332 eetype = "MicroWire";
1333 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1334 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1335 sc->sc_ee_addrbits, eetype);
1336 }
1337
1338 /*
1339 * Read the Ethernet address from the EEPROM, if not first found
1340 * in device properties.
1341 */
1342 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1343 if (ea != NULL) {
1344 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1345 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1346 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1347 } else {
1348 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1349 sizeof(myea) / sizeof(myea[0]), myea)) {
1350 aprint_error("%s: unable to read Ethernet address\n",
1351 sc->sc_dev.dv_xname);
1352 return;
1353 }
1354 enaddr[0] = myea[0] & 0xff;
1355 enaddr[1] = myea[0] >> 8;
1356 enaddr[2] = myea[1] & 0xff;
1357 enaddr[3] = myea[1] >> 8;
1358 enaddr[4] = myea[2] & 0xff;
1359 enaddr[5] = myea[2] >> 8;
1360 }
1361
1362 /*
1363 * Toggle the LSB of the MAC address on the second port
1364 * of the dual port controller.
1365 */
1366 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1367 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1368 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1369 enaddr[5] ^= 1;
1370 }
1371
1372 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1373 ether_sprintf(enaddr));
1374
1375 /*
1376 * Read the config info from the EEPROM, and set up various
1377 * bits in the control registers based on their contents.
1378 */
1379 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1380 "i82543-cfg1");
1381 if (pn != NULL) {
1382 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1383 cfg1 = (uint16_t) prop_number_integer_value(pn);
1384 } else {
1385 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1386 aprint_error("%s: unable to read CFG1\n",
1387 sc->sc_dev.dv_xname);
1388 return;
1389 }
1390 }
1391
1392 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1393 "i82543-cfg2");
1394 if (pn != NULL) {
1395 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1396 cfg2 = (uint16_t) prop_number_integer_value(pn);
1397 } else {
1398 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1399 aprint_error("%s: unable to read CFG2\n",
1400 sc->sc_dev.dv_xname);
1401 return;
1402 }
1403 }
1404
1405 if (sc->sc_type >= WM_T_82544) {
1406 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1407 "i82543-swdpin");
1408 if (pn != NULL) {
1409 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1410 swdpin = (uint16_t) prop_number_integer_value(pn);
1411 } else {
1412 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1413 aprint_error("%s: unable to read SWDPIN\n",
1414 sc->sc_dev.dv_xname);
1415 return;
1416 }
1417 }
1418 }
1419
1420 if (cfg1 & EEPROM_CFG1_ILOS)
1421 sc->sc_ctrl |= CTRL_ILOS;
1422 if (sc->sc_type >= WM_T_82544) {
1423 sc->sc_ctrl |=
1424 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1425 CTRL_SWDPIO_SHIFT;
1426 sc->sc_ctrl |=
1427 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1428 CTRL_SWDPINS_SHIFT;
1429 } else {
1430 sc->sc_ctrl |=
1431 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1432 CTRL_SWDPIO_SHIFT;
1433 }
1434
1435 #if 0
1436 if (sc->sc_type >= WM_T_82544) {
1437 if (cfg1 & EEPROM_CFG1_IPS0)
1438 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1439 if (cfg1 & EEPROM_CFG1_IPS1)
1440 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1441 sc->sc_ctrl_ext |=
1442 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1443 CTRL_EXT_SWDPIO_SHIFT;
1444 sc->sc_ctrl_ext |=
1445 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1446 CTRL_EXT_SWDPINS_SHIFT;
1447 } else {
1448 sc->sc_ctrl_ext |=
1449 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1450 CTRL_EXT_SWDPIO_SHIFT;
1451 }
1452 #endif
1453
1454 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1455 #if 0
1456 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1457 #endif
1458
1459 /*
1460 * Set up some register offsets that are different between
1461 * the i82542 and the i82543 and later chips.
1462 */
1463 if (sc->sc_type < WM_T_82543) {
1464 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1465 sc->sc_tdt_reg = WMREG_OLD_TDT;
1466 } else {
1467 sc->sc_rdt_reg = WMREG_RDT;
1468 sc->sc_tdt_reg = WMREG_TDT;
1469 }
1470
1471 /*
1472 * Determine if we're TBI or GMII mode, and initialize the
1473 * media structures accordingly.
1474 */
1475 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1476 || sc->sc_type == WM_T_82573) {
1477 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1478 wm_gmii_mediainit(sc);
1479 } else if (sc->sc_type < WM_T_82543 ||
1480 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1481 if (wmp->wmp_flags & WMP_F_1000T)
1482 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1483 "product!\n", sc->sc_dev.dv_xname);
1484 wm_tbi_mediainit(sc);
1485 } else {
1486 if (wmp->wmp_flags & WMP_F_1000X)
1487 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1488 "product!\n", sc->sc_dev.dv_xname);
1489 wm_gmii_mediainit(sc);
1490 }
1491
1492 ifp = &sc->sc_ethercom.ec_if;
1493 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1494 ifp->if_softc = sc;
1495 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1496 ifp->if_ioctl = wm_ioctl;
1497 ifp->if_start = wm_start;
1498 ifp->if_watchdog = wm_watchdog;
1499 ifp->if_init = wm_init;
1500 ifp->if_stop = wm_stop;
1501 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1502 IFQ_SET_READY(&ifp->if_snd);
1503
1504 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1505 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1506
1507 /*
1508 * If we're a i82543 or greater, we can support VLANs.
1509 */
1510 if (sc->sc_type >= WM_T_82543)
1511 sc->sc_ethercom.ec_capabilities |=
1512 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1513
1514 /*
1515 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1516 * on i82543 and later.
1517 */
1518 if (sc->sc_type >= WM_T_82543) {
1519 ifp->if_capabilities |=
1520 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1521 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1522 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1523 IFCAP_CSUM_TCPv6_Tx |
1524 IFCAP_CSUM_UDPv6_Tx;
1525 }
1526
1527 /*
1528 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1529 *
1530 * 82541GI (8086:1076) ... no
1531 * 82572EI (8086:10b9) ... yes
1532 */
1533 if (sc->sc_type >= WM_T_82571) {
1534 ifp->if_capabilities |=
1535 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1536 }
1537
1538 /*
1539 * If we're a i82544 or greater (except i82547), we can do
1540 * TCP segmentation offload.
1541 */
1542 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1543 ifp->if_capabilities |= IFCAP_TSOv4;
1544 }
1545
1546 if (sc->sc_type >= WM_T_82571) {
1547 ifp->if_capabilities |= IFCAP_TSOv6;
1548 }
1549
1550 /*
1551 * Attach the interface.
1552 */
1553 if_attach(ifp);
1554 ether_ifattach(ifp, enaddr);
1555 #if NRND > 0
1556 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1557 RND_TYPE_NET, 0);
1558 #endif
1559
1560 #ifdef WM_EVENT_COUNTERS
1561 /* Attach event counters. */
1562 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1563 NULL, sc->sc_dev.dv_xname, "txsstall");
1564 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1565 NULL, sc->sc_dev.dv_xname, "txdstall");
1566 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1567 NULL, sc->sc_dev.dv_xname, "txfifo_stall");
1568 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1569 NULL, sc->sc_dev.dv_xname, "txdw");
1570 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1571 NULL, sc->sc_dev.dv_xname, "txqe");
1572 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1573 NULL, sc->sc_dev.dv_xname, "rxintr");
1574 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1575 NULL, sc->sc_dev.dv_xname, "linkintr");
1576
1577 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1578 NULL, sc->sc_dev.dv_xname, "rxipsum");
1579 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1580 NULL, sc->sc_dev.dv_xname, "rxtusum");
1581 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1582 NULL, sc->sc_dev.dv_xname, "txipsum");
1583 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1584 NULL, sc->sc_dev.dv_xname, "txtusum");
1585 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1586 NULL, sc->sc_dev.dv_xname, "txtusum6");
1587
1588 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1589 NULL, sc->sc_dev.dv_xname, "txtso");
1590 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1591 NULL, sc->sc_dev.dv_xname, "txtso6");
1592 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1593 NULL, sc->sc_dev.dv_xname, "txtsopain");
1594
1595 for (i = 0; i < WM_NTXSEGS; i++) {
1596 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1597 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1598 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1599 }
1600
1601 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1602 NULL, sc->sc_dev.dv_xname, "txdrop");
1603
1604 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1605 NULL, sc->sc_dev.dv_xname, "tu");
1606
1607 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1608 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1609 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1610 NULL, sc->sc_dev.dv_xname, "tx_xon");
1611 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1612 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1613 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1614 NULL, sc->sc_dev.dv_xname, "rx_xon");
1615 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1616 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1617 #endif /* WM_EVENT_COUNTERS */
1618
1619 /*
1620 * Make sure the interface is shutdown during reboot.
1621 */
1622 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1623 if (sc->sc_sdhook == NULL)
1624 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1625 sc->sc_dev.dv_xname);
1626
1627 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
1628 wm_powerhook, sc);
1629 if (sc->sc_powerhook == NULL)
1630 aprint_error("%s: can't establish powerhook\n",
1631 sc->sc_dev.dv_xname);
1632 return;
1633
1634 /*
1635 * Free any resources we've allocated during the failed attach
1636 * attempt. Do this in reverse order and fall through.
1637 */
1638 fail_5:
1639 for (i = 0; i < WM_NRXDESC; i++) {
1640 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1641 bus_dmamap_destroy(sc->sc_dmat,
1642 sc->sc_rxsoft[i].rxs_dmamap);
1643 }
1644 fail_4:
1645 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1646 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1647 bus_dmamap_destroy(sc->sc_dmat,
1648 sc->sc_txsoft[i].txs_dmamap);
1649 }
1650 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1651 fail_3:
1652 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1653 fail_2:
1654 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1655 cdata_size);
1656 fail_1:
1657 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1658 fail_0:
1659 return;
1660 }
1661
1662 /*
1663 * wm_shutdown:
1664 *
1665 * Make sure the interface is stopped at reboot time.
1666 */
1667 static void
1668 wm_shutdown(void *arg)
1669 {
1670 struct wm_softc *sc = arg;
1671
1672 wm_stop(&sc->sc_ethercom.ec_if, 1);
1673 }
1674
1675 static void
1676 wm_powerhook(int why, void *arg)
1677 {
1678 struct wm_softc *sc = arg;
1679 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1680 pci_chipset_tag_t pc = sc->sc_pc;
1681 pcitag_t tag = sc->sc_pcitag;
1682
1683 switch (why) {
1684 case PWR_SOFTSUSPEND:
1685 wm_shutdown(sc);
1686 break;
1687 case PWR_SOFTRESUME:
1688 ifp->if_flags &= ~IFF_RUNNING;
1689 wm_init(ifp);
1690 if (ifp->if_flags & IFF_RUNNING)
1691 wm_start(ifp);
1692 break;
1693 case PWR_SUSPEND:
1694 pci_conf_capture(pc, tag, &sc->sc_pciconf);
1695 break;
1696 case PWR_RESUME:
1697 pci_conf_restore(pc, tag, &sc->sc_pciconf);
1698 break;
1699 }
1700
1701 return;
1702 }
1703
1704 /*
1705 * wm_tx_offload:
1706 *
1707 * Set up TCP/IP checksumming parameters for the
1708 * specified packet.
1709 */
1710 static int
1711 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1712 uint8_t *fieldsp)
1713 {
1714 struct mbuf *m0 = txs->txs_mbuf;
1715 struct livengood_tcpip_ctxdesc *t;
1716 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1717 uint32_t ipcse;
1718 struct ether_header *eh;
1719 int offset, iphl;
1720 uint8_t fields;
1721
1722 /*
1723 * XXX It would be nice if the mbuf pkthdr had offset
1724 * fields for the protocol headers.
1725 */
1726
1727 eh = mtod(m0, struct ether_header *);
1728 switch (htons(eh->ether_type)) {
1729 case ETHERTYPE_IP:
1730 case ETHERTYPE_IPV6:
1731 offset = ETHER_HDR_LEN;
1732 break;
1733
1734 case ETHERTYPE_VLAN:
1735 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1736 break;
1737
1738 default:
1739 /*
1740 * Don't support this protocol or encapsulation.
1741 */
1742 *fieldsp = 0;
1743 *cmdp = 0;
1744 return (0);
1745 }
1746
1747 if ((m0->m_pkthdr.csum_flags &
1748 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1749 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1750 } else {
1751 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1752 }
1753 ipcse = offset + iphl - 1;
1754
1755 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1756 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1757 seg = 0;
1758 fields = 0;
1759
1760 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1761 int hlen = offset + iphl;
1762 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1763
1764 if (__predict_false(m0->m_len <
1765 (hlen + sizeof(struct tcphdr)))) {
1766 /*
1767 * TCP/IP headers are not in the first mbuf; we need
1768 * to do this the slow and painful way. Let's just
1769 * hope this doesn't happen very often.
1770 */
1771 struct tcphdr th;
1772
1773 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1774
1775 m_copydata(m0, hlen, sizeof(th), &th);
1776 if (v4) {
1777 struct ip ip;
1778
1779 m_copydata(m0, offset, sizeof(ip), &ip);
1780 ip.ip_len = 0;
1781 m_copyback(m0,
1782 offset + offsetof(struct ip, ip_len),
1783 sizeof(ip.ip_len), &ip.ip_len);
1784 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1785 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1786 } else {
1787 struct ip6_hdr ip6;
1788
1789 m_copydata(m0, offset, sizeof(ip6), &ip6);
1790 ip6.ip6_plen = 0;
1791 m_copyback(m0,
1792 offset + offsetof(struct ip6_hdr, ip6_plen),
1793 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1794 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1795 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1796 }
1797 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1798 sizeof(th.th_sum), &th.th_sum);
1799
1800 hlen += th.th_off << 2;
1801 } else {
1802 /*
1803 * TCP/IP headers are in the first mbuf; we can do
1804 * this the easy way.
1805 */
1806 struct tcphdr *th;
1807
1808 if (v4) {
1809 struct ip *ip =
1810 (void *)(mtod(m0, char *) + offset);
1811 th = (void *)(mtod(m0, char *) + hlen);
1812
1813 ip->ip_len = 0;
1814 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1815 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1816 } else {
1817 struct ip6_hdr *ip6 =
1818 (void *)(mtod(m0, char *) + offset);
1819 th = (void *)(mtod(m0, char *) + hlen);
1820
1821 ip6->ip6_plen = 0;
1822 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1823 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1824 }
1825 hlen += th->th_off << 2;
1826 }
1827
1828 if (v4) {
1829 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1830 cmdlen |= WTX_TCPIP_CMD_IP;
1831 } else {
1832 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1833 ipcse = 0;
1834 }
1835 cmd |= WTX_TCPIP_CMD_TSE;
1836 cmdlen |= WTX_TCPIP_CMD_TSE |
1837 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1838 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1839 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1840 }
1841
1842 /*
1843 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1844 * offload feature, if we load the context descriptor, we
1845 * MUST provide valid values for IPCSS and TUCSS fields.
1846 */
1847
1848 ipcs = WTX_TCPIP_IPCSS(offset) |
1849 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1850 WTX_TCPIP_IPCSE(ipcse);
1851 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1852 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1853 fields |= WTX_IXSM;
1854 }
1855
1856 offset += iphl;
1857
1858 if (m0->m_pkthdr.csum_flags &
1859 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1860 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1861 fields |= WTX_TXSM;
1862 tucs = WTX_TCPIP_TUCSS(offset) |
1863 WTX_TCPIP_TUCSO(offset +
1864 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1865 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1866 } else if ((m0->m_pkthdr.csum_flags &
1867 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1868 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1869 fields |= WTX_TXSM;
1870 tucs = WTX_TCPIP_TUCSS(offset) |
1871 WTX_TCPIP_TUCSO(offset +
1872 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1873 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1874 } else {
1875 /* Just initialize it to a valid TCP context. */
1876 tucs = WTX_TCPIP_TUCSS(offset) |
1877 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1878 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1879 }
1880
1881 /* Fill in the context descriptor. */
1882 t = (struct livengood_tcpip_ctxdesc *)
1883 &sc->sc_txdescs[sc->sc_txnext];
1884 t->tcpip_ipcs = htole32(ipcs);
1885 t->tcpip_tucs = htole32(tucs);
1886 t->tcpip_cmdlen = htole32(cmdlen);
1887 t->tcpip_seg = htole32(seg);
1888 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1889
1890 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1891 txs->txs_ndesc++;
1892
1893 *cmdp = cmd;
1894 *fieldsp = fields;
1895
1896 return (0);
1897 }
1898
1899 static void
1900 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1901 {
1902 struct mbuf *m;
1903 int i;
1904
1905 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
1906 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1907 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1908 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
1909 m->m_data, m->m_len, m->m_flags);
1910 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
1911 i, i == 1 ? "" : "s");
1912 }
1913
1914 /*
1915 * wm_82547_txfifo_stall:
1916 *
1917 * Callout used to wait for the 82547 Tx FIFO to drain,
1918 * reset the FIFO pointers, and restart packet transmission.
1919 */
1920 static void
1921 wm_82547_txfifo_stall(void *arg)
1922 {
1923 struct wm_softc *sc = arg;
1924 int s;
1925
1926 s = splnet();
1927
1928 if (sc->sc_txfifo_stall) {
1929 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1930 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1931 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1932 /*
1933 * Packets have drained. Stop transmitter, reset
1934 * FIFO pointers, restart transmitter, and kick
1935 * the packet queue.
1936 */
1937 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1938 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1939 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1940 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1941 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1942 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1943 CSR_WRITE(sc, WMREG_TCTL, tctl);
1944 CSR_WRITE_FLUSH(sc);
1945
1946 sc->sc_txfifo_head = 0;
1947 sc->sc_txfifo_stall = 0;
1948 wm_start(&sc->sc_ethercom.ec_if);
1949 } else {
1950 /*
1951 * Still waiting for packets to drain; try again in
1952 * another tick.
1953 */
1954 callout_schedule(&sc->sc_txfifo_ch, 1);
1955 }
1956 }
1957
1958 splx(s);
1959 }
1960
1961 /*
1962 * wm_82547_txfifo_bugchk:
1963 *
1964 * Check for bug condition in the 82547 Tx FIFO. We need to
1965 * prevent enqueueing a packet that would wrap around the end
1966 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1967 *
1968 * We do this by checking the amount of space before the end
1969 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1970 * the Tx FIFO, wait for all remaining packets to drain, reset
1971 * the internal FIFO pointers to the beginning, and restart
1972 * transmission on the interface.
1973 */
1974 #define WM_FIFO_HDR 0x10
1975 #define WM_82547_PAD_LEN 0x3e0
1976 static int
1977 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1978 {
1979 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1980 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1981
1982 /* Just return if already stalled. */
1983 if (sc->sc_txfifo_stall)
1984 return (1);
1985
1986 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1987 /* Stall only occurs in half-duplex mode. */
1988 goto send_packet;
1989 }
1990
1991 if (len >= WM_82547_PAD_LEN + space) {
1992 sc->sc_txfifo_stall = 1;
1993 callout_schedule(&sc->sc_txfifo_ch, 1);
1994 return (1);
1995 }
1996
1997 send_packet:
1998 sc->sc_txfifo_head += len;
1999 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2000 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2001
2002 return (0);
2003 }
2004
2005 /*
2006 * wm_start: [ifnet interface function]
2007 *
2008 * Start packet transmission on the interface.
2009 */
2010 static void
2011 wm_start(struct ifnet *ifp)
2012 {
2013 struct wm_softc *sc = ifp->if_softc;
2014 struct mbuf *m0;
2015 #if 0 /* XXXJRT */
2016 struct m_tag *mtag;
2017 #endif
2018 struct wm_txsoft *txs;
2019 bus_dmamap_t dmamap;
2020 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2021 bus_addr_t curaddr;
2022 bus_size_t seglen, curlen;
2023 uint32_t cksumcmd;
2024 uint8_t cksumfields;
2025
2026 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2027 return;
2028
2029 /*
2030 * Remember the previous number of free descriptors.
2031 */
2032 ofree = sc->sc_txfree;
2033
2034 /*
2035 * Loop through the send queue, setting up transmit descriptors
2036 * until we drain the queue, or use up all available transmit
2037 * descriptors.
2038 */
2039 for (;;) {
2040 /* Grab a packet off the queue. */
2041 IFQ_POLL(&ifp->if_snd, m0);
2042 if (m0 == NULL)
2043 break;
2044
2045 DPRINTF(WM_DEBUG_TX,
2046 ("%s: TX: have packet to transmit: %p\n",
2047 sc->sc_dev.dv_xname, m0));
2048
2049 /* Get a work queue entry. */
2050 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2051 wm_txintr(sc);
2052 if (sc->sc_txsfree == 0) {
2053 DPRINTF(WM_DEBUG_TX,
2054 ("%s: TX: no free job descriptors\n",
2055 sc->sc_dev.dv_xname));
2056 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2057 break;
2058 }
2059 }
2060
2061 txs = &sc->sc_txsoft[sc->sc_txsnext];
2062 dmamap = txs->txs_dmamap;
2063
2064 use_tso = (m0->m_pkthdr.csum_flags &
2065 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2066
2067 /*
2068 * So says the Linux driver:
2069 * The controller does a simple calculation to make sure
2070 * there is enough room in the FIFO before initiating the
2071 * DMA for each buffer. The calc is:
2072 * 4 = ceil(buffer len / MSS)
2073 * To make sure we don't overrun the FIFO, adjust the max
2074 * buffer len if the MSS drops.
2075 */
2076 dmamap->dm_maxsegsz =
2077 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2078 ? m0->m_pkthdr.segsz << 2
2079 : WTX_MAX_LEN;
2080
2081 /*
2082 * Load the DMA map. If this fails, the packet either
2083 * didn't fit in the allotted number of segments, or we
2084 * were short on resources. For the too-many-segments
2085 * case, we simply report an error and drop the packet,
2086 * since we can't sanely copy a jumbo packet to a single
2087 * buffer.
2088 */
2089 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2090 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2091 if (error) {
2092 if (error == EFBIG) {
2093 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2094 log(LOG_ERR, "%s: Tx packet consumes too many "
2095 "DMA segments, dropping...\n",
2096 sc->sc_dev.dv_xname);
2097 IFQ_DEQUEUE(&ifp->if_snd, m0);
2098 wm_dump_mbuf_chain(sc, m0);
2099 m_freem(m0);
2100 continue;
2101 }
2102 /*
2103 * Short on resources, just stop for now.
2104 */
2105 DPRINTF(WM_DEBUG_TX,
2106 ("%s: TX: dmamap load failed: %d\n",
2107 sc->sc_dev.dv_xname, error));
2108 break;
2109 }
2110
2111 segs_needed = dmamap->dm_nsegs;
2112 if (use_tso) {
2113 /* For sentinel descriptor; see below. */
2114 segs_needed++;
2115 }
2116
2117 /*
2118 * Ensure we have enough descriptors free to describe
2119 * the packet. Note, we always reserve one descriptor
2120 * at the end of the ring due to the semantics of the
2121 * TDT register, plus one more in the event we need
2122 * to load offload context.
2123 */
2124 if (segs_needed > sc->sc_txfree - 2) {
2125 /*
2126 * Not enough free descriptors to transmit this
2127 * packet. We haven't committed anything yet,
2128 * so just unload the DMA map, put the packet
2129 * pack on the queue, and punt. Notify the upper
2130 * layer that there are no more slots left.
2131 */
2132 DPRINTF(WM_DEBUG_TX,
2133 ("%s: TX: need %d (%d) descriptors, have %d\n",
2134 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
2135 sc->sc_txfree - 1));
2136 ifp->if_flags |= IFF_OACTIVE;
2137 bus_dmamap_unload(sc->sc_dmat, dmamap);
2138 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2139 break;
2140 }
2141
2142 /*
2143 * Check for 82547 Tx FIFO bug. We need to do this
2144 * once we know we can transmit the packet, since we
2145 * do some internal FIFO space accounting here.
2146 */
2147 if (sc->sc_type == WM_T_82547 &&
2148 wm_82547_txfifo_bugchk(sc, m0)) {
2149 DPRINTF(WM_DEBUG_TX,
2150 ("%s: TX: 82547 Tx FIFO bug detected\n",
2151 sc->sc_dev.dv_xname));
2152 ifp->if_flags |= IFF_OACTIVE;
2153 bus_dmamap_unload(sc->sc_dmat, dmamap);
2154 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2155 break;
2156 }
2157
2158 IFQ_DEQUEUE(&ifp->if_snd, m0);
2159
2160 /*
2161 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2162 */
2163
2164 DPRINTF(WM_DEBUG_TX,
2165 ("%s: TX: packet has %d (%d) DMA segments\n",
2166 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
2167
2168 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2169
2170 /*
2171 * Store a pointer to the packet so that we can free it
2172 * later.
2173 *
2174 * Initially, we consider the number of descriptors the
2175 * packet uses the number of DMA segments. This may be
2176 * incremented by 1 if we do checksum offload (a descriptor
2177 * is used to set the checksum context).
2178 */
2179 txs->txs_mbuf = m0;
2180 txs->txs_firstdesc = sc->sc_txnext;
2181 txs->txs_ndesc = segs_needed;
2182
2183 /* Set up offload parameters for this packet. */
2184 if (m0->m_pkthdr.csum_flags &
2185 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2186 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2187 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2188 if (wm_tx_offload(sc, txs, &cksumcmd,
2189 &cksumfields) != 0) {
2190 /* Error message already displayed. */
2191 bus_dmamap_unload(sc->sc_dmat, dmamap);
2192 continue;
2193 }
2194 } else {
2195 cksumcmd = 0;
2196 cksumfields = 0;
2197 }
2198
2199 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2200
2201 /* Sync the DMA map. */
2202 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2203 BUS_DMASYNC_PREWRITE);
2204
2205 /*
2206 * Initialize the transmit descriptor.
2207 */
2208 for (nexttx = sc->sc_txnext, seg = 0;
2209 seg < dmamap->dm_nsegs; seg++) {
2210 for (seglen = dmamap->dm_segs[seg].ds_len,
2211 curaddr = dmamap->dm_segs[seg].ds_addr;
2212 seglen != 0;
2213 curaddr += curlen, seglen -= curlen,
2214 nexttx = WM_NEXTTX(sc, nexttx)) {
2215 curlen = seglen;
2216
2217 /*
2218 * So says the Linux driver:
2219 * Work around for premature descriptor
2220 * write-backs in TSO mode. Append a
2221 * 4-byte sentinel descriptor.
2222 */
2223 if (use_tso &&
2224 seg == dmamap->dm_nsegs - 1 &&
2225 curlen > 8)
2226 curlen -= 4;
2227
2228 wm_set_dma_addr(
2229 &sc->sc_txdescs[nexttx].wtx_addr,
2230 curaddr);
2231 sc->sc_txdescs[nexttx].wtx_cmdlen =
2232 htole32(cksumcmd | curlen);
2233 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2234 0;
2235 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2236 cksumfields;
2237 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2238 lasttx = nexttx;
2239
2240 DPRINTF(WM_DEBUG_TX,
2241 ("%s: TX: desc %d: low 0x%08lx, "
2242 "len 0x%04x\n",
2243 sc->sc_dev.dv_xname, nexttx,
2244 curaddr & 0xffffffffUL, (unsigned)curlen));
2245 }
2246 }
2247
2248 KASSERT(lasttx != -1);
2249
2250 /*
2251 * Set up the command byte on the last descriptor of
2252 * the packet. If we're in the interrupt delay window,
2253 * delay the interrupt.
2254 */
2255 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2256 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2257
2258 #if 0 /* XXXJRT */
2259 /*
2260 * If VLANs are enabled and the packet has a VLAN tag, set
2261 * up the descriptor to encapsulate the packet for us.
2262 *
2263 * This is only valid on the last descriptor of the packet.
2264 */
2265 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2266 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2267 htole32(WTX_CMD_VLE);
2268 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2269 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2270 }
2271 #endif /* XXXJRT */
2272
2273 txs->txs_lastdesc = lasttx;
2274
2275 DPRINTF(WM_DEBUG_TX,
2276 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
2277 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2278
2279 /* Sync the descriptors we're using. */
2280 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2281 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2282
2283 /* Give the packet to the chip. */
2284 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2285
2286 DPRINTF(WM_DEBUG_TX,
2287 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
2288
2289 DPRINTF(WM_DEBUG_TX,
2290 ("%s: TX: finished transmitting packet, job %d\n",
2291 sc->sc_dev.dv_xname, sc->sc_txsnext));
2292
2293 /* Advance the tx pointer. */
2294 sc->sc_txfree -= txs->txs_ndesc;
2295 sc->sc_txnext = nexttx;
2296
2297 sc->sc_txsfree--;
2298 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2299
2300 #if NBPFILTER > 0
2301 /* Pass the packet to any BPF listeners. */
2302 if (ifp->if_bpf)
2303 bpf_mtap(ifp->if_bpf, m0);
2304 #endif /* NBPFILTER > 0 */
2305 }
2306
2307 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2308 /* No more slots; notify upper layer. */
2309 ifp->if_flags |= IFF_OACTIVE;
2310 }
2311
2312 if (sc->sc_txfree != ofree) {
2313 /* Set a watchdog timer in case the chip flakes out. */
2314 ifp->if_timer = 5;
2315 }
2316 }
2317
2318 /*
2319 * wm_watchdog: [ifnet interface function]
2320 *
2321 * Watchdog timer handler.
2322 */
2323 static void
2324 wm_watchdog(struct ifnet *ifp)
2325 {
2326 struct wm_softc *sc = ifp->if_softc;
2327
2328 /*
2329 * Since we're using delayed interrupts, sweep up
2330 * before we report an error.
2331 */
2332 wm_txintr(sc);
2333
2334 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2335 log(LOG_ERR,
2336 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2337 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
2338 sc->sc_txnext);
2339 ifp->if_oerrors++;
2340
2341 /* Reset the interface. */
2342 (void) wm_init(ifp);
2343 }
2344
2345 /* Try to get more packets going. */
2346 wm_start(ifp);
2347 }
2348
2349 /*
2350 * wm_ioctl: [ifnet interface function]
2351 *
2352 * Handle control requests from the operator.
2353 */
2354 static int
2355 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2356 {
2357 struct wm_softc *sc = ifp->if_softc;
2358 struct ifreq *ifr = (struct ifreq *) data;
2359 int s, error;
2360
2361 s = splnet();
2362
2363 switch (cmd) {
2364 case SIOCSIFMEDIA:
2365 case SIOCGIFMEDIA:
2366 /* Flow control requires full-duplex mode. */
2367 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2368 (ifr->ifr_media & IFM_FDX) == 0)
2369 ifr->ifr_media &= ~IFM_ETH_FMASK;
2370 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2371 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2372 /* We can do both TXPAUSE and RXPAUSE. */
2373 ifr->ifr_media |=
2374 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2375 }
2376 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2377 }
2378 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2379 break;
2380 default:
2381 error = ether_ioctl(ifp, cmd, data);
2382 if (error == ENETRESET) {
2383 /*
2384 * Multicast list has changed; set the hardware filter
2385 * accordingly.
2386 */
2387 if (ifp->if_flags & IFF_RUNNING)
2388 wm_set_filter(sc);
2389 error = 0;
2390 }
2391 break;
2392 }
2393
2394 /* Try to get more packets going. */
2395 wm_start(ifp);
2396
2397 splx(s);
2398 return (error);
2399 }
2400
2401 /*
2402 * wm_intr:
2403 *
2404 * Interrupt service routine.
2405 */
2406 static int
2407 wm_intr(void *arg)
2408 {
2409 struct wm_softc *sc = arg;
2410 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2411 uint32_t icr;
2412 int handled = 0;
2413
2414 while (1 /* CONSTCOND */) {
2415 icr = CSR_READ(sc, WMREG_ICR);
2416 if ((icr & sc->sc_icr) == 0)
2417 break;
2418 #if 0 /*NRND > 0*/
2419 if (RND_ENABLED(&sc->rnd_source))
2420 rnd_add_uint32(&sc->rnd_source, icr);
2421 #endif
2422
2423 handled = 1;
2424
2425 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2426 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2427 DPRINTF(WM_DEBUG_RX,
2428 ("%s: RX: got Rx intr 0x%08x\n",
2429 sc->sc_dev.dv_xname,
2430 icr & (ICR_RXDMT0|ICR_RXT0)));
2431 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2432 }
2433 #endif
2434 wm_rxintr(sc);
2435
2436 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2437 if (icr & ICR_TXDW) {
2438 DPRINTF(WM_DEBUG_TX,
2439 ("%s: TX: got TXDW interrupt\n",
2440 sc->sc_dev.dv_xname));
2441 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2442 }
2443 #endif
2444 wm_txintr(sc);
2445
2446 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2447 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2448 wm_linkintr(sc, icr);
2449 }
2450
2451 if (icr & ICR_RXO) {
2452 ifp->if_ierrors++;
2453 #if defined(WM_DEBUG)
2454 log(LOG_WARNING, "%s: Receive overrun\n",
2455 sc->sc_dev.dv_xname);
2456 #endif /* defined(WM_DEBUG) */
2457 }
2458 }
2459
2460 if (handled) {
2461 /* Try to get more packets going. */
2462 wm_start(ifp);
2463 }
2464
2465 return (handled);
2466 }
2467
2468 /*
2469 * wm_txintr:
2470 *
2471 * Helper; handle transmit interrupts.
2472 */
2473 static void
2474 wm_txintr(struct wm_softc *sc)
2475 {
2476 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2477 struct wm_txsoft *txs;
2478 uint8_t status;
2479 int i;
2480
2481 ifp->if_flags &= ~IFF_OACTIVE;
2482
2483 /*
2484 * Go through the Tx list and free mbufs for those
2485 * frames which have been transmitted.
2486 */
2487 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2488 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2489 txs = &sc->sc_txsoft[i];
2490
2491 DPRINTF(WM_DEBUG_TX,
2492 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
2493
2494 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2495 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2496
2497 status =
2498 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2499 if ((status & WTX_ST_DD) == 0) {
2500 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2501 BUS_DMASYNC_PREREAD);
2502 break;
2503 }
2504
2505 DPRINTF(WM_DEBUG_TX,
2506 ("%s: TX: job %d done: descs %d..%d\n",
2507 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
2508 txs->txs_lastdesc));
2509
2510 /*
2511 * XXX We should probably be using the statistics
2512 * XXX registers, but I don't know if they exist
2513 * XXX on chips before the i82544.
2514 */
2515
2516 #ifdef WM_EVENT_COUNTERS
2517 if (status & WTX_ST_TU)
2518 WM_EVCNT_INCR(&sc->sc_ev_tu);
2519 #endif /* WM_EVENT_COUNTERS */
2520
2521 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2522 ifp->if_oerrors++;
2523 if (status & WTX_ST_LC)
2524 log(LOG_WARNING, "%s: late collision\n",
2525 sc->sc_dev.dv_xname);
2526 else if (status & WTX_ST_EC) {
2527 ifp->if_collisions += 16;
2528 log(LOG_WARNING, "%s: excessive collisions\n",
2529 sc->sc_dev.dv_xname);
2530 }
2531 } else
2532 ifp->if_opackets++;
2533
2534 sc->sc_txfree += txs->txs_ndesc;
2535 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2536 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2537 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2538 m_freem(txs->txs_mbuf);
2539 txs->txs_mbuf = NULL;
2540 }
2541
2542 /* Update the dirty transmit buffer pointer. */
2543 sc->sc_txsdirty = i;
2544 DPRINTF(WM_DEBUG_TX,
2545 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
2546
2547 /*
2548 * If there are no more pending transmissions, cancel the watchdog
2549 * timer.
2550 */
2551 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2552 ifp->if_timer = 0;
2553 }
2554
2555 /*
2556 * wm_rxintr:
2557 *
2558 * Helper; handle receive interrupts.
2559 */
2560 static void
2561 wm_rxintr(struct wm_softc *sc)
2562 {
2563 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2564 struct wm_rxsoft *rxs;
2565 struct mbuf *m;
2566 int i, len;
2567 uint8_t status, errors;
2568
2569 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2570 rxs = &sc->sc_rxsoft[i];
2571
2572 DPRINTF(WM_DEBUG_RX,
2573 ("%s: RX: checking descriptor %d\n",
2574 sc->sc_dev.dv_xname, i));
2575
2576 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2577
2578 status = sc->sc_rxdescs[i].wrx_status;
2579 errors = sc->sc_rxdescs[i].wrx_errors;
2580 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2581
2582 if ((status & WRX_ST_DD) == 0) {
2583 /*
2584 * We have processed all of the receive descriptors.
2585 */
2586 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2587 break;
2588 }
2589
2590 if (__predict_false(sc->sc_rxdiscard)) {
2591 DPRINTF(WM_DEBUG_RX,
2592 ("%s: RX: discarding contents of descriptor %d\n",
2593 sc->sc_dev.dv_xname, i));
2594 WM_INIT_RXDESC(sc, i);
2595 if (status & WRX_ST_EOP) {
2596 /* Reset our state. */
2597 DPRINTF(WM_DEBUG_RX,
2598 ("%s: RX: resetting rxdiscard -> 0\n",
2599 sc->sc_dev.dv_xname));
2600 sc->sc_rxdiscard = 0;
2601 }
2602 continue;
2603 }
2604
2605 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2606 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2607
2608 m = rxs->rxs_mbuf;
2609
2610 /*
2611 * Add a new receive buffer to the ring, unless of
2612 * course the length is zero. Treat the latter as a
2613 * failed mapping.
2614 */
2615 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2616 /*
2617 * Failed, throw away what we've done so
2618 * far, and discard the rest of the packet.
2619 */
2620 ifp->if_ierrors++;
2621 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2622 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2623 WM_INIT_RXDESC(sc, i);
2624 if ((status & WRX_ST_EOP) == 0)
2625 sc->sc_rxdiscard = 1;
2626 if (sc->sc_rxhead != NULL)
2627 m_freem(sc->sc_rxhead);
2628 WM_RXCHAIN_RESET(sc);
2629 DPRINTF(WM_DEBUG_RX,
2630 ("%s: RX: Rx buffer allocation failed, "
2631 "dropping packet%s\n", sc->sc_dev.dv_xname,
2632 sc->sc_rxdiscard ? " (discard)" : ""));
2633 continue;
2634 }
2635
2636 WM_RXCHAIN_LINK(sc, m);
2637
2638 m->m_len = len;
2639
2640 DPRINTF(WM_DEBUG_RX,
2641 ("%s: RX: buffer at %p len %d\n",
2642 sc->sc_dev.dv_xname, m->m_data, len));
2643
2644 /*
2645 * If this is not the end of the packet, keep
2646 * looking.
2647 */
2648 if ((status & WRX_ST_EOP) == 0) {
2649 sc->sc_rxlen += len;
2650 DPRINTF(WM_DEBUG_RX,
2651 ("%s: RX: not yet EOP, rxlen -> %d\n",
2652 sc->sc_dev.dv_xname, sc->sc_rxlen));
2653 continue;
2654 }
2655
2656 /*
2657 * Okay, we have the entire packet now. The chip is
2658 * configured to include the FCS (not all chips can
2659 * be configured to strip it), so we need to trim it.
2660 */
2661 m->m_len -= ETHER_CRC_LEN;
2662
2663 *sc->sc_rxtailp = NULL;
2664 len = m->m_len + sc->sc_rxlen;
2665 m = sc->sc_rxhead;
2666
2667 WM_RXCHAIN_RESET(sc);
2668
2669 DPRINTF(WM_DEBUG_RX,
2670 ("%s: RX: have entire packet, len -> %d\n",
2671 sc->sc_dev.dv_xname, len));
2672
2673 /*
2674 * If an error occurred, update stats and drop the packet.
2675 */
2676 if (errors &
2677 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2678 ifp->if_ierrors++;
2679 if (errors & WRX_ER_SE)
2680 log(LOG_WARNING, "%s: symbol error\n",
2681 sc->sc_dev.dv_xname);
2682 else if (errors & WRX_ER_SEQ)
2683 log(LOG_WARNING, "%s: receive sequence error\n",
2684 sc->sc_dev.dv_xname);
2685 else if (errors & WRX_ER_CE)
2686 log(LOG_WARNING, "%s: CRC error\n",
2687 sc->sc_dev.dv_xname);
2688 m_freem(m);
2689 continue;
2690 }
2691
2692 /*
2693 * No errors. Receive the packet.
2694 */
2695 m->m_pkthdr.rcvif = ifp;
2696 m->m_pkthdr.len = len;
2697
2698 #if 0 /* XXXJRT */
2699 /*
2700 * If VLANs are enabled, VLAN packets have been unwrapped
2701 * for us. Associate the tag with the packet.
2702 */
2703 if ((status & WRX_ST_VP) != 0) {
2704 VLAN_INPUT_TAG(ifp, m,
2705 le16toh(sc->sc_rxdescs[i].wrx_special,
2706 continue);
2707 }
2708 #endif /* XXXJRT */
2709
2710 /*
2711 * Set up checksum info for this packet.
2712 */
2713 if ((status & WRX_ST_IXSM) == 0) {
2714 if (status & WRX_ST_IPCS) {
2715 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2716 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2717 if (errors & WRX_ER_IPE)
2718 m->m_pkthdr.csum_flags |=
2719 M_CSUM_IPv4_BAD;
2720 }
2721 if (status & WRX_ST_TCPCS) {
2722 /*
2723 * Note: we don't know if this was TCP or UDP,
2724 * so we just set both bits, and expect the
2725 * upper layers to deal.
2726 */
2727 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2728 m->m_pkthdr.csum_flags |=
2729 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2730 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2731 if (errors & WRX_ER_TCPE)
2732 m->m_pkthdr.csum_flags |=
2733 M_CSUM_TCP_UDP_BAD;
2734 }
2735 }
2736
2737 ifp->if_ipackets++;
2738
2739 #if NBPFILTER > 0
2740 /* Pass this up to any BPF listeners. */
2741 if (ifp->if_bpf)
2742 bpf_mtap(ifp->if_bpf, m);
2743 #endif /* NBPFILTER > 0 */
2744
2745 /* Pass it on. */
2746 (*ifp->if_input)(ifp, m);
2747 }
2748
2749 /* Update the receive pointer. */
2750 sc->sc_rxptr = i;
2751
2752 DPRINTF(WM_DEBUG_RX,
2753 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2754 }
2755
2756 /*
2757 * wm_linkintr:
2758 *
2759 * Helper; handle link interrupts.
2760 */
2761 static void
2762 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2763 {
2764 uint32_t status;
2765
2766 /*
2767 * If we get a link status interrupt on a 1000BASE-T
2768 * device, just fall into the normal MII tick path.
2769 */
2770 if (sc->sc_flags & WM_F_HAS_MII) {
2771 if (icr & ICR_LSC) {
2772 DPRINTF(WM_DEBUG_LINK,
2773 ("%s: LINK: LSC -> mii_tick\n",
2774 sc->sc_dev.dv_xname));
2775 mii_tick(&sc->sc_mii);
2776 } else if (icr & ICR_RXSEQ) {
2777 DPRINTF(WM_DEBUG_LINK,
2778 ("%s: LINK Receive sequence error\n",
2779 sc->sc_dev.dv_xname));
2780 }
2781 return;
2782 }
2783
2784 /*
2785 * If we are now receiving /C/, check for link again in
2786 * a couple of link clock ticks.
2787 */
2788 if (icr & ICR_RXCFG) {
2789 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2790 sc->sc_dev.dv_xname));
2791 sc->sc_tbi_anstate = 2;
2792 }
2793
2794 if (icr & ICR_LSC) {
2795 status = CSR_READ(sc, WMREG_STATUS);
2796 if (status & STATUS_LU) {
2797 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2798 sc->sc_dev.dv_xname,
2799 (status & STATUS_FD) ? "FDX" : "HDX"));
2800 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2801 sc->sc_fcrtl &= ~FCRTL_XONE;
2802 if (status & STATUS_FD)
2803 sc->sc_tctl |=
2804 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2805 else
2806 sc->sc_tctl |=
2807 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2808 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2809 sc->sc_fcrtl |= FCRTL_XONE;
2810 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2811 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2812 WMREG_OLD_FCRTL : WMREG_FCRTL,
2813 sc->sc_fcrtl);
2814 sc->sc_tbi_linkup = 1;
2815 } else {
2816 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2817 sc->sc_dev.dv_xname));
2818 sc->sc_tbi_linkup = 0;
2819 }
2820 sc->sc_tbi_anstate = 2;
2821 wm_tbi_set_linkled(sc);
2822 } else if (icr & ICR_RXSEQ) {
2823 DPRINTF(WM_DEBUG_LINK,
2824 ("%s: LINK: Receive sequence error\n",
2825 sc->sc_dev.dv_xname));
2826 }
2827 }
2828
2829 /*
2830 * wm_tick:
2831 *
2832 * One second timer, used to check link status, sweep up
2833 * completed transmit jobs, etc.
2834 */
2835 static void
2836 wm_tick(void *arg)
2837 {
2838 struct wm_softc *sc = arg;
2839 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2840 int s;
2841
2842 s = splnet();
2843
2844 if (sc->sc_type >= WM_T_82542_2_1) {
2845 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2846 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2847 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2848 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2849 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2850 }
2851
2852 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2853 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2854
2855
2856 if (sc->sc_flags & WM_F_HAS_MII)
2857 mii_tick(&sc->sc_mii);
2858 else
2859 wm_tbi_check_link(sc);
2860
2861 splx(s);
2862
2863 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2864 }
2865
2866 /*
2867 * wm_reset:
2868 *
2869 * Reset the i82542 chip.
2870 */
2871 static void
2872 wm_reset(struct wm_softc *sc)
2873 {
2874 int i;
2875
2876 /*
2877 * Allocate on-chip memory according to the MTU size.
2878 * The Packet Buffer Allocation register must be written
2879 * before the chip is reset.
2880 */
2881 switch (sc->sc_type) {
2882 case WM_T_82547:
2883 case WM_T_82547_2:
2884 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2885 PBA_22K : PBA_30K;
2886 sc->sc_txfifo_head = 0;
2887 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2888 sc->sc_txfifo_size =
2889 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2890 sc->sc_txfifo_stall = 0;
2891 break;
2892 case WM_T_82571:
2893 case WM_T_82572:
2894 case WM_T_80003:
2895 sc->sc_pba = PBA_32K;
2896 break;
2897 case WM_T_82573:
2898 sc->sc_pba = PBA_12K;
2899 break;
2900 case WM_T_ICH8:
2901 sc->sc_pba = PBA_8K;
2902 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2903 break;
2904 case WM_T_ICH9:
2905 sc->sc_pba = PBA_10K;
2906 break;
2907 default:
2908 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2909 PBA_40K : PBA_48K;
2910 break;
2911 }
2912 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2913
2914 if (sc->sc_flags & WM_F_PCIE) {
2915 int timeout = 800;
2916
2917 sc->sc_ctrl |= CTRL_GIO_M_DIS;
2918 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2919
2920 while (timeout) {
2921 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2922 break;
2923 delay(100);
2924 }
2925 }
2926
2927 /* clear interrupt */
2928 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2929
2930 /*
2931 * 82541 Errata 29? & 82547 Errata 28?
2932 * See also the description about PHY_RST bit in CTRL register
2933 * in 8254x_GBe_SDM.pdf.
2934 */
2935 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2936 CSR_WRITE(sc, WMREG_CTRL,
2937 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2938 delay(5000);
2939 }
2940
2941 switch (sc->sc_type) {
2942 case WM_T_82544:
2943 case WM_T_82540:
2944 case WM_T_82545:
2945 case WM_T_82546:
2946 case WM_T_82541:
2947 case WM_T_82541_2:
2948 /*
2949 * On some chipsets, a reset through a memory-mapped write
2950 * cycle can cause the chip to reset before completing the
2951 * write cycle. This causes major headache that can be
2952 * avoided by issuing the reset via indirect register writes
2953 * through I/O space.
2954 *
2955 * So, if we successfully mapped the I/O BAR at attach time,
2956 * use that. Otherwise, try our luck with a memory-mapped
2957 * reset.
2958 */
2959 if (sc->sc_flags & WM_F_IOH_VALID)
2960 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2961 else
2962 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2963 break;
2964
2965 case WM_T_82545_3:
2966 case WM_T_82546_3:
2967 /* Use the shadow control register on these chips. */
2968 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2969 break;
2970
2971 case WM_T_ICH8:
2972 case WM_T_ICH9:
2973 wm_get_swfwhw_semaphore(sc);
2974 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2975 delay(10000);
2976
2977 default:
2978 /* Everything else can safely use the documented method. */
2979 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2980 break;
2981 }
2982 delay(10000);
2983
2984 switch(sc->sc_type) {
2985 case WM_T_82542_2_0:
2986 case WM_T_82542_2_1:
2987 case WM_T_82543:
2988 case WM_T_82544:
2989 delay(10);
2990 delay(2000);
2991 break;
2992 case WM_T_82541:
2993 case WM_T_82541_2:
2994 case WM_T_82547:
2995 case WM_T_82547_2:
2996 delay(20000);
2997 break;
2998 case WM_T_82573:
2999 delay(10);
3000 /* FALLTHROUGH */
3001 default:
3002 /* wait for eeprom to reload */
3003 for (i = 10; i > 0; i--) {
3004 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3005 break;
3006 delay(1000);
3007 }
3008 if (i == 0) {
3009 log(LOG_ERR, "%s: auto read from eeprom failed to "
3010 "complete\n", sc->sc_dev.dv_xname);
3011 }
3012 }
3013
3014 #if 0
3015 for (i = 0; i < 1000; i++) {
3016 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3017 return;
3018 }
3019 delay(20);
3020 }
3021
3022 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3023 log(LOG_ERR, "%s: reset failed to complete\n",
3024 sc->sc_dev.dv_xname);
3025 #endif
3026 }
3027
3028 /*
3029 * wm_init: [ifnet interface function]
3030 *
3031 * Initialize the interface. Must be called at splnet().
3032 */
3033 static int
3034 wm_init(struct ifnet *ifp)
3035 {
3036 struct wm_softc *sc = ifp->if_softc;
3037 struct wm_rxsoft *rxs;
3038 int i, error = 0;
3039 uint32_t reg;
3040
3041 /*
3042 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3043 * There is a small but measurable benefit to avoiding the adjusment
3044 * of the descriptor so that the headers are aligned, for normal mtu,
3045 * on such platforms. One possibility is that the DMA itself is
3046 * slightly more efficient if the front of the entire packet (instead
3047 * of the front of the headers) is aligned.
3048 *
3049 * Note we must always set align_tweak to 0 if we are using
3050 * jumbo frames.
3051 */
3052 #ifdef __NO_STRICT_ALIGNMENT
3053 sc->sc_align_tweak = 0;
3054 #else
3055 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3056 sc->sc_align_tweak = 0;
3057 else
3058 sc->sc_align_tweak = 2;
3059 #endif /* __NO_STRICT_ALIGNMENT */
3060
3061 /* Cancel any pending I/O. */
3062 wm_stop(ifp, 0);
3063
3064 /* update statistics before reset */
3065 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3066 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3067
3068 /* Reset the chip to a known state. */
3069 wm_reset(sc);
3070
3071 /* Initialize the transmit descriptor ring. */
3072 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3073 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3074 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3075 sc->sc_txfree = WM_NTXDESC(sc);
3076 sc->sc_txnext = 0;
3077
3078 if (sc->sc_type < WM_T_82543) {
3079 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3080 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3081 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3082 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3083 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3084 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3085 } else {
3086 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3087 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3088 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3089 CSR_WRITE(sc, WMREG_TDH, 0);
3090 CSR_WRITE(sc, WMREG_TDT, 0);
3091 CSR_WRITE(sc, WMREG_TIDV, 64);
3092 CSR_WRITE(sc, WMREG_TADV, 128);
3093
3094 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3095 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3096 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3097 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3098 }
3099 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3100 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3101
3102 /* Initialize the transmit job descriptors. */
3103 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3104 sc->sc_txsoft[i].txs_mbuf = NULL;
3105 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3106 sc->sc_txsnext = 0;
3107 sc->sc_txsdirty = 0;
3108
3109 /*
3110 * Initialize the receive descriptor and receive job
3111 * descriptor rings.
3112 */
3113 if (sc->sc_type < WM_T_82543) {
3114 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3115 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3116 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3117 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3118 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3119 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3120
3121 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3122 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3123 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3124 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3125 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3126 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3127 } else {
3128 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3129 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3130 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3131 CSR_WRITE(sc, WMREG_RDH, 0);
3132 CSR_WRITE(sc, WMREG_RDT, 0);
3133 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD);
3134 CSR_WRITE(sc, WMREG_RADV, 128);
3135 }
3136 for (i = 0; i < WM_NRXDESC; i++) {
3137 rxs = &sc->sc_rxsoft[i];
3138 if (rxs->rxs_mbuf == NULL) {
3139 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3140 log(LOG_ERR, "%s: unable to allocate or map rx "
3141 "buffer %d, error = %d\n",
3142 sc->sc_dev.dv_xname, i, error);
3143 /*
3144 * XXX Should attempt to run with fewer receive
3145 * XXX buffers instead of just failing.
3146 */
3147 wm_rxdrain(sc);
3148 goto out;
3149 }
3150 } else
3151 WM_INIT_RXDESC(sc, i);
3152 }
3153 sc->sc_rxptr = 0;
3154 sc->sc_rxdiscard = 0;
3155 WM_RXCHAIN_RESET(sc);
3156
3157 /*
3158 * Clear out the VLAN table -- we don't use it (yet).
3159 */
3160 CSR_WRITE(sc, WMREG_VET, 0);
3161 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3162 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3163
3164 /*
3165 * Set up flow-control parameters.
3166 *
3167 * XXX Values could probably stand some tuning.
3168 */
3169 if (sc->sc_type != WM_T_ICH8) {
3170 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3171 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3172 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3173 }
3174
3175 sc->sc_fcrtl = FCRTL_DFLT;
3176 if (sc->sc_type < WM_T_82543) {
3177 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3178 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3179 } else {
3180 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3181 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3182 }
3183 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3184
3185 #if 0 /* XXXJRT */
3186 /* Deal with VLAN enables. */
3187 if (VLAN_ATTACHED(&sc->sc_ethercom))
3188 sc->sc_ctrl |= CTRL_VME;
3189 else
3190 #endif /* XXXJRT */
3191 sc->sc_ctrl &= ~CTRL_VME;
3192
3193 /* Write the control registers. */
3194 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3195 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3196 int val;
3197 val = CSR_READ(sc, WMREG_CTRL_EXT);
3198 val &= ~CTRL_EXT_LINK_MODE_MASK;
3199 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3200
3201 /* Bypass RX and TX FIFO's */
3202 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3203 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3204 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3205
3206 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3207 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3208 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3209 /*
3210 * Set the mac to wait the maximum time between each
3211 * iteration and increase the max iterations when
3212 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3213 */
3214 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3215 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3216 val |= 0x3F;
3217 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3218 }
3219 #if 0
3220 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3221 #endif
3222
3223 /*
3224 * Set up checksum offload parameters.
3225 */
3226 reg = CSR_READ(sc, WMREG_RXCSUM);
3227 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3228 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3229 reg |= RXCSUM_IPOFL;
3230 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3231 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3232 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3233 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3234 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3235
3236 /*
3237 * Set up the interrupt registers.
3238 */
3239 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3240 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3241 ICR_RXO | ICR_RXT0;
3242 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3243 sc->sc_icr |= ICR_RXCFG;
3244 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3245
3246 /* Set up the inter-packet gap. */
3247 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3248
3249 if (sc->sc_type >= WM_T_82543) {
3250 /* Set up the interrupt throttling register (units of 256ns) */
3251 sc->sc_itr = 1000000000 / (7000 * 256);
3252 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3253 }
3254
3255 #if 0 /* XXXJRT */
3256 /* Set the VLAN ethernetype. */
3257 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3258 #endif
3259
3260 /*
3261 * Set up the transmit control register; we start out with
3262 * a collision distance suitable for FDX, but update it whe
3263 * we resolve the media type.
3264 */
3265 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3266 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3267 if (sc->sc_type >= WM_T_82571)
3268 sc->sc_tctl |= TCTL_MULR;
3269 if (sc->sc_type >= WM_T_80003)
3270 sc->sc_tctl |= TCTL_RTLC;
3271 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3272
3273 /* Set the media. */
3274 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
3275
3276 /*
3277 * Set up the receive control register; we actually program
3278 * the register when we set the receive filter. Use multicast
3279 * address offset type 0.
3280 *
3281 * Only the i82544 has the ability to strip the incoming
3282 * CRC, so we don't enable that feature.
3283 */
3284 sc->sc_mchash_type = 0;
3285 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3286 | RCTL_MO(sc->sc_mchash_type);
3287
3288 /* 82573 doesn't support jumbo frame */
3289 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3290 sc->sc_rctl |= RCTL_LPE;
3291
3292 if (MCLBYTES == 2048) {
3293 sc->sc_rctl |= RCTL_2k;
3294 } else {
3295 if (sc->sc_type >= WM_T_82543) {
3296 switch(MCLBYTES) {
3297 case 4096:
3298 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3299 break;
3300 case 8192:
3301 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3302 break;
3303 case 16384:
3304 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3305 break;
3306 default:
3307 panic("wm_init: MCLBYTES %d unsupported",
3308 MCLBYTES);
3309 break;
3310 }
3311 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3312 }
3313
3314 /* Set the receive filter. */
3315 wm_set_filter(sc);
3316
3317 /* Start the one second link check clock. */
3318 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3319
3320 /* ...all done! */
3321 ifp->if_flags |= IFF_RUNNING;
3322 ifp->if_flags &= ~IFF_OACTIVE;
3323
3324 out:
3325 if (error)
3326 log(LOG_ERR, "%s: interface not running\n",
3327 sc->sc_dev.dv_xname);
3328 return (error);
3329 }
3330
3331 /*
3332 * wm_rxdrain:
3333 *
3334 * Drain the receive queue.
3335 */
3336 static void
3337 wm_rxdrain(struct wm_softc *sc)
3338 {
3339 struct wm_rxsoft *rxs;
3340 int i;
3341
3342 for (i = 0; i < WM_NRXDESC; i++) {
3343 rxs = &sc->sc_rxsoft[i];
3344 if (rxs->rxs_mbuf != NULL) {
3345 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3346 m_freem(rxs->rxs_mbuf);
3347 rxs->rxs_mbuf = NULL;
3348 }
3349 }
3350 }
3351
3352 /*
3353 * wm_stop: [ifnet interface function]
3354 *
3355 * Stop transmission on the interface.
3356 */
3357 static void
3358 wm_stop(struct ifnet *ifp, int disable)
3359 {
3360 struct wm_softc *sc = ifp->if_softc;
3361 struct wm_txsoft *txs;
3362 int i;
3363
3364 /* Stop the one second clock. */
3365 callout_stop(&sc->sc_tick_ch);
3366
3367 /* Stop the 82547 Tx FIFO stall check timer. */
3368 if (sc->sc_type == WM_T_82547)
3369 callout_stop(&sc->sc_txfifo_ch);
3370
3371 if (sc->sc_flags & WM_F_HAS_MII) {
3372 /* Down the MII. */
3373 mii_down(&sc->sc_mii);
3374 }
3375
3376 /* Stop the transmit and receive processes. */
3377 CSR_WRITE(sc, WMREG_TCTL, 0);
3378 CSR_WRITE(sc, WMREG_RCTL, 0);
3379
3380 /*
3381 * Clear the interrupt mask to ensure the device cannot assert its
3382 * interrupt line.
3383 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3384 * any currently pending or shared interrupt.
3385 */
3386 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3387 sc->sc_icr = 0;
3388
3389 /* Release any queued transmit buffers. */
3390 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3391 txs = &sc->sc_txsoft[i];
3392 if (txs->txs_mbuf != NULL) {
3393 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3394 m_freem(txs->txs_mbuf);
3395 txs->txs_mbuf = NULL;
3396 }
3397 }
3398
3399 if (disable)
3400 wm_rxdrain(sc);
3401
3402 /* Mark the interface as down and cancel the watchdog timer. */
3403 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3404 ifp->if_timer = 0;
3405 }
3406
3407 /*
3408 * wm_acquire_eeprom:
3409 *
3410 * Perform the EEPROM handshake required on some chips.
3411 */
3412 static int
3413 wm_acquire_eeprom(struct wm_softc *sc)
3414 {
3415 uint32_t reg;
3416 int x;
3417 int ret = 0;
3418
3419 /* always success */
3420 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3421 return 0;
3422
3423 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3424 ret = wm_get_swfwhw_semaphore(sc);
3425 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3426 /* this will also do wm_get_swsm_semaphore() if needed */
3427 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3428 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3429 ret = wm_get_swsm_semaphore(sc);
3430 }
3431
3432 if (ret)
3433 return 1;
3434
3435 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3436 reg = CSR_READ(sc, WMREG_EECD);
3437
3438 /* Request EEPROM access. */
3439 reg |= EECD_EE_REQ;
3440 CSR_WRITE(sc, WMREG_EECD, reg);
3441
3442 /* ..and wait for it to be granted. */
3443 for (x = 0; x < 1000; x++) {
3444 reg = CSR_READ(sc, WMREG_EECD);
3445 if (reg & EECD_EE_GNT)
3446 break;
3447 delay(5);
3448 }
3449 if ((reg & EECD_EE_GNT) == 0) {
3450 aprint_error("%s: could not acquire EEPROM GNT\n",
3451 sc->sc_dev.dv_xname);
3452 reg &= ~EECD_EE_REQ;
3453 CSR_WRITE(sc, WMREG_EECD, reg);
3454 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3455 wm_put_swfwhw_semaphore(sc);
3456 if (sc->sc_flags & WM_F_SWFW_SYNC)
3457 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3458 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3459 wm_put_swsm_semaphore(sc);
3460 return (1);
3461 }
3462 }
3463
3464 return (0);
3465 }
3466
3467 /*
3468 * wm_release_eeprom:
3469 *
3470 * Release the EEPROM mutex.
3471 */
3472 static void
3473 wm_release_eeprom(struct wm_softc *sc)
3474 {
3475 uint32_t reg;
3476
3477 /* always success */
3478 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3479 return;
3480
3481 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3482 reg = CSR_READ(sc, WMREG_EECD);
3483 reg &= ~EECD_EE_REQ;
3484 CSR_WRITE(sc, WMREG_EECD, reg);
3485 }
3486
3487 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3488 wm_put_swfwhw_semaphore(sc);
3489 if (sc->sc_flags & WM_F_SWFW_SYNC)
3490 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3491 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3492 wm_put_swsm_semaphore(sc);
3493 }
3494
3495 /*
3496 * wm_eeprom_sendbits:
3497 *
3498 * Send a series of bits to the EEPROM.
3499 */
3500 static void
3501 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3502 {
3503 uint32_t reg;
3504 int x;
3505
3506 reg = CSR_READ(sc, WMREG_EECD);
3507
3508 for (x = nbits; x > 0; x--) {
3509 if (bits & (1U << (x - 1)))
3510 reg |= EECD_DI;
3511 else
3512 reg &= ~EECD_DI;
3513 CSR_WRITE(sc, WMREG_EECD, reg);
3514 delay(2);
3515 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3516 delay(2);
3517 CSR_WRITE(sc, WMREG_EECD, reg);
3518 delay(2);
3519 }
3520 }
3521
3522 /*
3523 * wm_eeprom_recvbits:
3524 *
3525 * Receive a series of bits from the EEPROM.
3526 */
3527 static void
3528 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3529 {
3530 uint32_t reg, val;
3531 int x;
3532
3533 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3534
3535 val = 0;
3536 for (x = nbits; x > 0; x--) {
3537 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3538 delay(2);
3539 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3540 val |= (1U << (x - 1));
3541 CSR_WRITE(sc, WMREG_EECD, reg);
3542 delay(2);
3543 }
3544 *valp = val;
3545 }
3546
3547 /*
3548 * wm_read_eeprom_uwire:
3549 *
3550 * Read a word from the EEPROM using the MicroWire protocol.
3551 */
3552 static int
3553 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3554 {
3555 uint32_t reg, val;
3556 int i;
3557
3558 for (i = 0; i < wordcnt; i++) {
3559 /* Clear SK and DI. */
3560 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3561 CSR_WRITE(sc, WMREG_EECD, reg);
3562
3563 /* Set CHIP SELECT. */
3564 reg |= EECD_CS;
3565 CSR_WRITE(sc, WMREG_EECD, reg);
3566 delay(2);
3567
3568 /* Shift in the READ command. */
3569 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3570
3571 /* Shift in address. */
3572 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3573
3574 /* Shift out the data. */
3575 wm_eeprom_recvbits(sc, &val, 16);
3576 data[i] = val & 0xffff;
3577
3578 /* Clear CHIP SELECT. */
3579 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3580 CSR_WRITE(sc, WMREG_EECD, reg);
3581 delay(2);
3582 }
3583
3584 return (0);
3585 }
3586
3587 /*
3588 * wm_spi_eeprom_ready:
3589 *
3590 * Wait for a SPI EEPROM to be ready for commands.
3591 */
3592 static int
3593 wm_spi_eeprom_ready(struct wm_softc *sc)
3594 {
3595 uint32_t val;
3596 int usec;
3597
3598 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3599 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3600 wm_eeprom_recvbits(sc, &val, 8);
3601 if ((val & SPI_SR_RDY) == 0)
3602 break;
3603 }
3604 if (usec >= SPI_MAX_RETRIES) {
3605 aprint_error("%s: EEPROM failed to become ready\n",
3606 sc->sc_dev.dv_xname);
3607 return (1);
3608 }
3609 return (0);
3610 }
3611
3612 /*
3613 * wm_read_eeprom_spi:
3614 *
3615 * Read a work from the EEPROM using the SPI protocol.
3616 */
3617 static int
3618 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3619 {
3620 uint32_t reg, val;
3621 int i;
3622 uint8_t opc;
3623
3624 /* Clear SK and CS. */
3625 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3626 CSR_WRITE(sc, WMREG_EECD, reg);
3627 delay(2);
3628
3629 if (wm_spi_eeprom_ready(sc))
3630 return (1);
3631
3632 /* Toggle CS to flush commands. */
3633 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3634 delay(2);
3635 CSR_WRITE(sc, WMREG_EECD, reg);
3636 delay(2);
3637
3638 opc = SPI_OPC_READ;
3639 if (sc->sc_ee_addrbits == 8 && word >= 128)
3640 opc |= SPI_OPC_A8;
3641
3642 wm_eeprom_sendbits(sc, opc, 8);
3643 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3644
3645 for (i = 0; i < wordcnt; i++) {
3646 wm_eeprom_recvbits(sc, &val, 16);
3647 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3648 }
3649
3650 /* Raise CS and clear SK. */
3651 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3652 CSR_WRITE(sc, WMREG_EECD, reg);
3653 delay(2);
3654
3655 return (0);
3656 }
3657
3658 #define EEPROM_CHECKSUM 0xBABA
3659 #define EEPROM_SIZE 0x0040
3660
3661 /*
3662 * wm_validate_eeprom_checksum
3663 *
3664 * The checksum is defined as the sum of the first 64 (16 bit) words.
3665 */
3666 static int
3667 wm_validate_eeprom_checksum(struct wm_softc *sc)
3668 {
3669 uint16_t checksum;
3670 uint16_t eeprom_data;
3671 int i;
3672
3673 checksum = 0;
3674
3675 for (i = 0; i < EEPROM_SIZE; i++) {
3676 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3677 return 1;
3678 checksum += eeprom_data;
3679 }
3680
3681 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3682 return 1;
3683
3684 return 0;
3685 }
3686
3687 /*
3688 * wm_read_eeprom:
3689 *
3690 * Read data from the serial EEPROM.
3691 */
3692 static int
3693 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3694 {
3695 int rv;
3696
3697 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3698 return 1;
3699
3700 if (wm_acquire_eeprom(sc))
3701 return 1;
3702
3703 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3704 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3705 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3706 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3707 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3708 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3709 else
3710 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3711
3712 wm_release_eeprom(sc);
3713 return rv;
3714 }
3715
3716 static int
3717 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3718 uint16_t *data)
3719 {
3720 int i, eerd = 0;
3721 int error = 0;
3722
3723 for (i = 0; i < wordcnt; i++) {
3724 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3725
3726 CSR_WRITE(sc, WMREG_EERD, eerd);
3727 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3728 if (error != 0)
3729 break;
3730
3731 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3732 }
3733
3734 return error;
3735 }
3736
3737 static int
3738 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3739 {
3740 uint32_t attempts = 100000;
3741 uint32_t i, reg = 0;
3742 int32_t done = -1;
3743
3744 for (i = 0; i < attempts; i++) {
3745 reg = CSR_READ(sc, rw);
3746
3747 if (reg & EERD_DONE) {
3748 done = 0;
3749 break;
3750 }
3751 delay(5);
3752 }
3753
3754 return done;
3755 }
3756
3757 /*
3758 * wm_add_rxbuf:
3759 *
3760 * Add a receive buffer to the indiciated descriptor.
3761 */
3762 static int
3763 wm_add_rxbuf(struct wm_softc *sc, int idx)
3764 {
3765 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3766 struct mbuf *m;
3767 int error;
3768
3769 MGETHDR(m, M_DONTWAIT, MT_DATA);
3770 if (m == NULL)
3771 return (ENOBUFS);
3772
3773 MCLGET(m, M_DONTWAIT);
3774 if ((m->m_flags & M_EXT) == 0) {
3775 m_freem(m);
3776 return (ENOBUFS);
3777 }
3778
3779 if (rxs->rxs_mbuf != NULL)
3780 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3781
3782 rxs->rxs_mbuf = m;
3783
3784 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3785 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3786 BUS_DMA_READ|BUS_DMA_NOWAIT);
3787 if (error) {
3788 /* XXX XXX XXX */
3789 printf("%s: unable to load rx DMA map %d, error = %d\n",
3790 sc->sc_dev.dv_xname, idx, error);
3791 panic("wm_add_rxbuf");
3792 }
3793
3794 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3795 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3796
3797 WM_INIT_RXDESC(sc, idx);
3798
3799 return (0);
3800 }
3801
3802 /*
3803 * wm_set_ral:
3804 *
3805 * Set an entery in the receive address list.
3806 */
3807 static void
3808 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3809 {
3810 uint32_t ral_lo, ral_hi;
3811
3812 if (enaddr != NULL) {
3813 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3814 (enaddr[3] << 24);
3815 ral_hi = enaddr[4] | (enaddr[5] << 8);
3816 ral_hi |= RAL_AV;
3817 } else {
3818 ral_lo = 0;
3819 ral_hi = 0;
3820 }
3821
3822 if (sc->sc_type >= WM_T_82544) {
3823 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3824 ral_lo);
3825 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3826 ral_hi);
3827 } else {
3828 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3829 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3830 }
3831 }
3832
3833 /*
3834 * wm_mchash:
3835 *
3836 * Compute the hash of the multicast address for the 4096-bit
3837 * multicast filter.
3838 */
3839 static uint32_t
3840 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3841 {
3842 static const int lo_shift[4] = { 4, 3, 2, 0 };
3843 static const int hi_shift[4] = { 4, 5, 6, 8 };
3844 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3845 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3846 uint32_t hash;
3847
3848 if (sc->sc_type == WM_T_ICH8) {
3849 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3850 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3851 return (hash & 0x3ff);
3852 }
3853 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3854 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3855
3856 return (hash & 0xfff);
3857 }
3858
3859 /*
3860 * wm_set_filter:
3861 *
3862 * Set up the receive filter.
3863 */
3864 static void
3865 wm_set_filter(struct wm_softc *sc)
3866 {
3867 struct ethercom *ec = &sc->sc_ethercom;
3868 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3869 struct ether_multi *enm;
3870 struct ether_multistep step;
3871 bus_addr_t mta_reg;
3872 uint32_t hash, reg, bit;
3873 int i, size;
3874
3875 if (sc->sc_type >= WM_T_82544)
3876 mta_reg = WMREG_CORDOVA_MTA;
3877 else
3878 mta_reg = WMREG_MTA;
3879
3880 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3881
3882 if (ifp->if_flags & IFF_BROADCAST)
3883 sc->sc_rctl |= RCTL_BAM;
3884 if (ifp->if_flags & IFF_PROMISC) {
3885 sc->sc_rctl |= RCTL_UPE;
3886 goto allmulti;
3887 }
3888
3889 /*
3890 * Set the station address in the first RAL slot, and
3891 * clear the remaining slots.
3892 */
3893 if (sc->sc_type == WM_T_ICH8)
3894 size = WM_ICH8_RAL_TABSIZE;
3895 else
3896 size = WM_RAL_TABSIZE;
3897 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3898 for (i = 1; i < size; i++)
3899 wm_set_ral(sc, NULL, i);
3900
3901 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3902 size = WM_ICH8_MC_TABSIZE;
3903 else
3904 size = WM_MC_TABSIZE;
3905 /* Clear out the multicast table. */
3906 for (i = 0; i < size; i++)
3907 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3908
3909 ETHER_FIRST_MULTI(step, ec, enm);
3910 while (enm != NULL) {
3911 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3912 /*
3913 * We must listen to a range of multicast addresses.
3914 * For now, just accept all multicasts, rather than
3915 * trying to set only those filter bits needed to match
3916 * the range. (At this time, the only use of address
3917 * ranges is for IP multicast routing, for which the
3918 * range is big enough to require all bits set.)
3919 */
3920 goto allmulti;
3921 }
3922
3923 hash = wm_mchash(sc, enm->enm_addrlo);
3924
3925 reg = (hash >> 5);
3926 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3927 reg &= 0x1f;
3928 else
3929 reg &= 0x7f;
3930 bit = hash & 0x1f;
3931
3932 hash = CSR_READ(sc, mta_reg + (reg << 2));
3933 hash |= 1U << bit;
3934
3935 /* XXX Hardware bug?? */
3936 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3937 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3938 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3939 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3940 } else
3941 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3942
3943 ETHER_NEXT_MULTI(step, enm);
3944 }
3945
3946 ifp->if_flags &= ~IFF_ALLMULTI;
3947 goto setit;
3948
3949 allmulti:
3950 ifp->if_flags |= IFF_ALLMULTI;
3951 sc->sc_rctl |= RCTL_MPE;
3952
3953 setit:
3954 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3955 }
3956
3957 /*
3958 * wm_tbi_mediainit:
3959 *
3960 * Initialize media for use on 1000BASE-X devices.
3961 */
3962 static void
3963 wm_tbi_mediainit(struct wm_softc *sc)
3964 {
3965 const char *sep = "";
3966
3967 if (sc->sc_type < WM_T_82543)
3968 sc->sc_tipg = TIPG_WM_DFLT;
3969 else
3970 sc->sc_tipg = TIPG_LG_DFLT;
3971
3972 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3973 wm_tbi_mediastatus);
3974
3975 /*
3976 * SWD Pins:
3977 *
3978 * 0 = Link LED (output)
3979 * 1 = Loss Of Signal (input)
3980 */
3981 sc->sc_ctrl |= CTRL_SWDPIO(0);
3982 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3983
3984 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3985
3986 #define ADD(ss, mm, dd) \
3987 do { \
3988 aprint_normal("%s%s", sep, ss); \
3989 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3990 sep = ", "; \
3991 } while (/*CONSTCOND*/0)
3992
3993 aprint_normal("%s: ", sc->sc_dev.dv_xname);
3994 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3995 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3996 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3997 aprint_normal("\n");
3998
3999 #undef ADD
4000
4001 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4002 }
4003
4004 /*
4005 * wm_tbi_mediastatus: [ifmedia interface function]
4006 *
4007 * Get the current interface media status on a 1000BASE-X device.
4008 */
4009 static void
4010 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4011 {
4012 struct wm_softc *sc = ifp->if_softc;
4013 uint32_t ctrl;
4014
4015 ifmr->ifm_status = IFM_AVALID;
4016 ifmr->ifm_active = IFM_ETHER;
4017
4018 if (sc->sc_tbi_linkup == 0) {
4019 ifmr->ifm_active |= IFM_NONE;
4020 return;
4021 }
4022
4023 ifmr->ifm_status |= IFM_ACTIVE;
4024 ifmr->ifm_active |= IFM_1000_SX;
4025 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4026 ifmr->ifm_active |= IFM_FDX;
4027 ctrl = CSR_READ(sc, WMREG_CTRL);
4028 if (ctrl & CTRL_RFCE)
4029 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4030 if (ctrl & CTRL_TFCE)
4031 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4032 }
4033
4034 /*
4035 * wm_tbi_mediachange: [ifmedia interface function]
4036 *
4037 * Set hardware to newly-selected media on a 1000BASE-X device.
4038 */
4039 static int
4040 wm_tbi_mediachange(struct ifnet *ifp)
4041 {
4042 struct wm_softc *sc = ifp->if_softc;
4043 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4044 uint32_t status;
4045 int i;
4046
4047 sc->sc_txcw = ife->ifm_data;
4048 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
4049 sc->sc_dev.dv_xname,sc->sc_txcw));
4050 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4051 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4052 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
4053 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4054 sc->sc_txcw |= TXCW_ANE;
4055 } else {
4056 /*If autonegotiation is turned off, force link up and turn on full duplex*/
4057 sc->sc_txcw &= ~TXCW_ANE;
4058 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4059 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4060 delay(1000);
4061 }
4062
4063 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4064 sc->sc_dev.dv_xname,sc->sc_txcw));
4065 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4066 delay(10000);
4067
4068 /* NOTE: CTRL will update TFCE and RFCE automatically. */
4069
4070 sc->sc_tbi_anstate = 0;
4071
4072 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4073 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i));
4074
4075 /*
4076 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4077 * optics detect a signal, 0 if they don't.
4078 */
4079 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4080 /* Have signal; wait for the link to come up. */
4081
4082 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4083 /*
4084 * Reset the link, and let autonegotiation do its thing
4085 */
4086 sc->sc_ctrl |= CTRL_LRST;
4087 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4088 delay(1000);
4089 sc->sc_ctrl &= ~CTRL_LRST;
4090 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4091 delay(1000);
4092 }
4093
4094 for (i = 0; i < 50; i++) {
4095 delay(10000);
4096 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4097 break;
4098 }
4099
4100 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4101 sc->sc_dev.dv_xname,i));
4102
4103 status = CSR_READ(sc, WMREG_STATUS);
4104 DPRINTF(WM_DEBUG_LINK,
4105 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4106 sc->sc_dev.dv_xname,status, STATUS_LU));
4107 if (status & STATUS_LU) {
4108 /* Link is up. */
4109 DPRINTF(WM_DEBUG_LINK,
4110 ("%s: LINK: set media -> link up %s\n",
4111 sc->sc_dev.dv_xname,
4112 (status & STATUS_FD) ? "FDX" : "HDX"));
4113 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4114 sc->sc_fcrtl &= ~FCRTL_XONE;
4115 if (status & STATUS_FD)
4116 sc->sc_tctl |=
4117 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4118 else
4119 sc->sc_tctl |=
4120 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4121 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4122 sc->sc_fcrtl |= FCRTL_XONE;
4123 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4124 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4125 WMREG_OLD_FCRTL : WMREG_FCRTL,
4126 sc->sc_fcrtl);
4127 sc->sc_tbi_linkup = 1;
4128 } else {
4129 /* Link is down. */
4130 DPRINTF(WM_DEBUG_LINK,
4131 ("%s: LINK: set media -> link down\n",
4132 sc->sc_dev.dv_xname));
4133 sc->sc_tbi_linkup = 0;
4134 }
4135 } else {
4136 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4137 sc->sc_dev.dv_xname));
4138 sc->sc_tbi_linkup = 0;
4139 }
4140
4141 wm_tbi_set_linkled(sc);
4142
4143 return (0);
4144 }
4145
4146 /*
4147 * wm_tbi_set_linkled:
4148 *
4149 * Update the link LED on 1000BASE-X devices.
4150 */
4151 static void
4152 wm_tbi_set_linkled(struct wm_softc *sc)
4153 {
4154
4155 if (sc->sc_tbi_linkup)
4156 sc->sc_ctrl |= CTRL_SWDPIN(0);
4157 else
4158 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4159
4160 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4161 }
4162
4163 /*
4164 * wm_tbi_check_link:
4165 *
4166 * Check the link on 1000BASE-X devices.
4167 */
4168 static void
4169 wm_tbi_check_link(struct wm_softc *sc)
4170 {
4171 uint32_t rxcw, ctrl, status;
4172
4173 if (sc->sc_tbi_anstate == 0)
4174 return;
4175 else if (sc->sc_tbi_anstate > 1) {
4176 DPRINTF(WM_DEBUG_LINK,
4177 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
4178 sc->sc_tbi_anstate));
4179 sc->sc_tbi_anstate--;
4180 return;
4181 }
4182
4183 sc->sc_tbi_anstate = 0;
4184
4185 rxcw = CSR_READ(sc, WMREG_RXCW);
4186 ctrl = CSR_READ(sc, WMREG_CTRL);
4187 status = CSR_READ(sc, WMREG_STATUS);
4188
4189 if ((status & STATUS_LU) == 0) {
4190 DPRINTF(WM_DEBUG_LINK,
4191 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
4192 sc->sc_tbi_linkup = 0;
4193 } else {
4194 DPRINTF(WM_DEBUG_LINK,
4195 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
4196 (status & STATUS_FD) ? "FDX" : "HDX"));
4197 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4198 sc->sc_fcrtl &= ~FCRTL_XONE;
4199 if (status & STATUS_FD)
4200 sc->sc_tctl |=
4201 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4202 else
4203 sc->sc_tctl |=
4204 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4205 if (ctrl & CTRL_TFCE)
4206 sc->sc_fcrtl |= FCRTL_XONE;
4207 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4208 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4209 WMREG_OLD_FCRTL : WMREG_FCRTL,
4210 sc->sc_fcrtl);
4211 sc->sc_tbi_linkup = 1;
4212 }
4213
4214 wm_tbi_set_linkled(sc);
4215 }
4216
4217 /*
4218 * wm_gmii_reset:
4219 *
4220 * Reset the PHY.
4221 */
4222 static void
4223 wm_gmii_reset(struct wm_softc *sc)
4224 {
4225 uint32_t reg;
4226 int func = 0; /* XXX gcc */
4227
4228 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
4229 if (wm_get_swfwhw_semaphore(sc))
4230 return;
4231 }
4232 if (sc->sc_type == WM_T_80003) {
4233 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4234 if (wm_get_swfw_semaphore(sc,
4235 func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4236 return;
4237 }
4238 if (sc->sc_type >= WM_T_82544) {
4239 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4240 delay(20000);
4241
4242 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4243 delay(20000);
4244 } else {
4245 /*
4246 * With 82543, we need to force speed and duplex on the MAC
4247 * equal to what the PHY speed and duplex configuration is.
4248 * In addition, we need to perform a hardware reset on the PHY
4249 * to take it out of reset.
4250 */
4251 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4252 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4253
4254 /* The PHY reset pin is active-low. */
4255 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4256 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4257 CTRL_EXT_SWDPIN(4));
4258 reg |= CTRL_EXT_SWDPIO(4);
4259
4260 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4261 delay(10);
4262
4263 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4264 delay(10000);
4265
4266 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4267 delay(10);
4268 #if 0
4269 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4270 #endif
4271 }
4272 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
4273 wm_put_swfwhw_semaphore(sc);
4274 if (sc->sc_type == WM_T_80003)
4275 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4276 }
4277
4278 /*
4279 * wm_gmii_mediainit:
4280 *
4281 * Initialize media for use on 1000BASE-T devices.
4282 */
4283 static void
4284 wm_gmii_mediainit(struct wm_softc *sc)
4285 {
4286 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4287
4288 /* We have MII. */
4289 sc->sc_flags |= WM_F_HAS_MII;
4290
4291 if (sc->sc_type >= WM_T_80003)
4292 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4293 else
4294 sc->sc_tipg = TIPG_1000T_DFLT;
4295
4296 /*
4297 * Let the chip set speed/duplex on its own based on
4298 * signals from the PHY.
4299 * XXXbouyer - I'm not sure this is right for the 80003,
4300 * the em driver only sets CTRL_SLU here - but it seems to work.
4301 */
4302 sc->sc_ctrl |= CTRL_SLU;
4303 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4304
4305 /* Initialize our media structures and probe the GMII. */
4306 sc->sc_mii.mii_ifp = ifp;
4307
4308 if (sc->sc_type >= WM_T_80003) {
4309 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4310 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4311 } else if (sc->sc_type >= WM_T_82544) {
4312 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4313 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4314 } else {
4315 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4316 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4317 }
4318 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4319
4320 wm_gmii_reset(sc);
4321
4322 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4323 wm_gmii_mediastatus);
4324
4325 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4326 MII_OFFSET_ANY, MIIF_DOPAUSE);
4327 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4328 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4329 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4330 } else
4331 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4332 }
4333
4334 /*
4335 * wm_gmii_mediastatus: [ifmedia interface function]
4336 *
4337 * Get the current interface media status on a 1000BASE-T device.
4338 */
4339 static void
4340 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4341 {
4342 struct wm_softc *sc = ifp->if_softc;
4343
4344 mii_pollstat(&sc->sc_mii);
4345 ifmr->ifm_status = sc->sc_mii.mii_media_status;
4346 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
4347 sc->sc_flowflags;
4348 }
4349
4350 /*
4351 * wm_gmii_mediachange: [ifmedia interface function]
4352 *
4353 * Set hardware to newly-selected media on a 1000BASE-T device.
4354 */
4355 static int
4356 wm_gmii_mediachange(struct ifnet *ifp)
4357 {
4358 struct wm_softc *sc = ifp->if_softc;
4359 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4360
4361 if (ifp->if_flags & IFF_UP) {
4362 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4363 sc->sc_ctrl |= CTRL_SLU;
4364 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4365 || (sc->sc_type > WM_T_82543)) {
4366 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4367 } else {
4368 sc->sc_ctrl &= ~CTRL_ASDE;
4369 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4370 if (ife->ifm_media & IFM_FDX)
4371 sc->sc_ctrl |= CTRL_FD;
4372 switch(IFM_SUBTYPE(ife->ifm_media)) {
4373 case IFM_10_T:
4374 sc->sc_ctrl |= CTRL_SPEED_10;
4375 break;
4376 case IFM_100_TX:
4377 sc->sc_ctrl |= CTRL_SPEED_100;
4378 break;
4379 case IFM_1000_T:
4380 sc->sc_ctrl |= CTRL_SPEED_1000;
4381 break;
4382 default:
4383 panic("wm_gmii_mediachange: bad media 0x%x",
4384 ife->ifm_media);
4385 }
4386 }
4387 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4388 if (sc->sc_type <= WM_T_82543)
4389 wm_gmii_reset(sc);
4390 mii_mediachg(&sc->sc_mii);
4391 }
4392 return (0);
4393 }
4394
4395 #define MDI_IO CTRL_SWDPIN(2)
4396 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4397 #define MDI_CLK CTRL_SWDPIN(3)
4398
4399 static void
4400 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4401 {
4402 uint32_t i, v;
4403
4404 v = CSR_READ(sc, WMREG_CTRL);
4405 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4406 v |= MDI_DIR | CTRL_SWDPIO(3);
4407
4408 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4409 if (data & i)
4410 v |= MDI_IO;
4411 else
4412 v &= ~MDI_IO;
4413 CSR_WRITE(sc, WMREG_CTRL, v);
4414 delay(10);
4415 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4416 delay(10);
4417 CSR_WRITE(sc, WMREG_CTRL, v);
4418 delay(10);
4419 }
4420 }
4421
4422 static uint32_t
4423 i82543_mii_recvbits(struct wm_softc *sc)
4424 {
4425 uint32_t v, i, data = 0;
4426
4427 v = CSR_READ(sc, WMREG_CTRL);
4428 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4429 v |= CTRL_SWDPIO(3);
4430
4431 CSR_WRITE(sc, WMREG_CTRL, v);
4432 delay(10);
4433 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4434 delay(10);
4435 CSR_WRITE(sc, WMREG_CTRL, v);
4436 delay(10);
4437
4438 for (i = 0; i < 16; i++) {
4439 data <<= 1;
4440 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4441 delay(10);
4442 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4443 data |= 1;
4444 CSR_WRITE(sc, WMREG_CTRL, v);
4445 delay(10);
4446 }
4447
4448 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4449 delay(10);
4450 CSR_WRITE(sc, WMREG_CTRL, v);
4451 delay(10);
4452
4453 return (data);
4454 }
4455
4456 #undef MDI_IO
4457 #undef MDI_DIR
4458 #undef MDI_CLK
4459
4460 /*
4461 * wm_gmii_i82543_readreg: [mii interface function]
4462 *
4463 * Read a PHY register on the GMII (i82543 version).
4464 */
4465 static int
4466 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
4467 {
4468 struct wm_softc *sc = (void *) self;
4469 int rv;
4470
4471 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4472 i82543_mii_sendbits(sc, reg | (phy << 5) |
4473 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4474 rv = i82543_mii_recvbits(sc) & 0xffff;
4475
4476 DPRINTF(WM_DEBUG_GMII,
4477 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4478 sc->sc_dev.dv_xname, phy, reg, rv));
4479
4480 return (rv);
4481 }
4482
4483 /*
4484 * wm_gmii_i82543_writereg: [mii interface function]
4485 *
4486 * Write a PHY register on the GMII (i82543 version).
4487 */
4488 static void
4489 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
4490 {
4491 struct wm_softc *sc = (void *) self;
4492
4493 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4494 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4495 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4496 (MII_COMMAND_START << 30), 32);
4497 }
4498
4499 /*
4500 * wm_gmii_i82544_readreg: [mii interface function]
4501 *
4502 * Read a PHY register on the GMII.
4503 */
4504 static int
4505 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
4506 {
4507 struct wm_softc *sc = (void *) self;
4508 uint32_t mdic = 0;
4509 int i, rv;
4510
4511 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4512 MDIC_REGADD(reg));
4513
4514 for (i = 0; i < 320; i++) {
4515 mdic = CSR_READ(sc, WMREG_MDIC);
4516 if (mdic & MDIC_READY)
4517 break;
4518 delay(10);
4519 }
4520
4521 if ((mdic & MDIC_READY) == 0) {
4522 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4523 sc->sc_dev.dv_xname, phy, reg);
4524 rv = 0;
4525 } else if (mdic & MDIC_E) {
4526 #if 0 /* This is normal if no PHY is present. */
4527 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4528 sc->sc_dev.dv_xname, phy, reg);
4529 #endif
4530 rv = 0;
4531 } else {
4532 rv = MDIC_DATA(mdic);
4533 if (rv == 0xffff)
4534 rv = 0;
4535 }
4536
4537 return (rv);
4538 }
4539
4540 /*
4541 * wm_gmii_i82544_writereg: [mii interface function]
4542 *
4543 * Write a PHY register on the GMII.
4544 */
4545 static void
4546 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
4547 {
4548 struct wm_softc *sc = (void *) self;
4549 uint32_t mdic = 0;
4550 int i;
4551
4552 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4553 MDIC_REGADD(reg) | MDIC_DATA(val));
4554
4555 for (i = 0; i < 320; i++) {
4556 mdic = CSR_READ(sc, WMREG_MDIC);
4557 if (mdic & MDIC_READY)
4558 break;
4559 delay(10);
4560 }
4561
4562 if ((mdic & MDIC_READY) == 0)
4563 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4564 sc->sc_dev.dv_xname, phy, reg);
4565 else if (mdic & MDIC_E)
4566 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4567 sc->sc_dev.dv_xname, phy, reg);
4568 }
4569
4570 /*
4571 * wm_gmii_i80003_readreg: [mii interface function]
4572 *
4573 * Read a PHY register on the kumeran
4574 * This could be handled by the PHY layer if we didn't have to lock the
4575 * ressource ...
4576 */
4577 static int
4578 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
4579 {
4580 struct wm_softc *sc = (void *) self;
4581 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4582 int rv;
4583
4584 if (phy != 1) /* only one PHY on kumeran bus */
4585 return 0;
4586
4587 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4588 return 0;
4589
4590 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4591 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4592 reg >> GG82563_PAGE_SHIFT);
4593 } else {
4594 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4595 reg >> GG82563_PAGE_SHIFT);
4596 }
4597
4598 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4599 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4600 return (rv);
4601 }
4602
4603 /*
4604 * wm_gmii_i80003_writereg: [mii interface function]
4605 *
4606 * Write a PHY register on the kumeran.
4607 * This could be handled by the PHY layer if we didn't have to lock the
4608 * ressource ...
4609 */
4610 static void
4611 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
4612 {
4613 struct wm_softc *sc = (void *) self;
4614 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4615
4616 if (phy != 1) /* only one PHY on kumeran bus */
4617 return;
4618
4619 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4620 return;
4621
4622 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4623 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4624 reg >> GG82563_PAGE_SHIFT);
4625 } else {
4626 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4627 reg >> GG82563_PAGE_SHIFT);
4628 }
4629
4630 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4631 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4632 }
4633
4634 /*
4635 * wm_gmii_statchg: [mii interface function]
4636 *
4637 * Callback from MII layer when media changes.
4638 */
4639 static void
4640 wm_gmii_statchg(struct device *self)
4641 {
4642 struct wm_softc *sc = (void *) self;
4643 struct mii_data *mii = &sc->sc_mii;
4644
4645 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4646 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4647 sc->sc_fcrtl &= ~FCRTL_XONE;
4648
4649 /*
4650 * Get flow control negotiation result.
4651 */
4652 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4653 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4654 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4655 mii->mii_media_active &= ~IFM_ETH_FMASK;
4656 }
4657
4658 if (sc->sc_flowflags & IFM_FLOW) {
4659 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4660 sc->sc_ctrl |= CTRL_TFCE;
4661 sc->sc_fcrtl |= FCRTL_XONE;
4662 }
4663 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4664 sc->sc_ctrl |= CTRL_RFCE;
4665 }
4666
4667 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4668 DPRINTF(WM_DEBUG_LINK,
4669 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
4670 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4671 } else {
4672 DPRINTF(WM_DEBUG_LINK,
4673 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
4674 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4675 }
4676
4677 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4678 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4679 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4680 : WMREG_FCRTL, sc->sc_fcrtl);
4681 if (sc->sc_type >= WM_T_80003) {
4682 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4683 case IFM_1000_T:
4684 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4685 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4686 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4687 break;
4688 default:
4689 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4690 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4691 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4692 break;
4693 }
4694 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4695 }
4696 }
4697
4698 /*
4699 * wm_kmrn_i80003_readreg:
4700 *
4701 * Read a kumeran register
4702 */
4703 static int
4704 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4705 {
4706 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4707 int rv;
4708
4709 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4710 return 0;
4711
4712 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4713 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4714 KUMCTRLSTA_REN);
4715 delay(2);
4716
4717 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4718 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4719 return (rv);
4720 }
4721
4722 /*
4723 * wm_kmrn_i80003_writereg:
4724 *
4725 * Write a kumeran register
4726 */
4727 static void
4728 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4729 {
4730 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4731
4732 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4733 return;
4734
4735 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4736 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4737 (val & KUMCTRLSTA_MASK));
4738 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4739 }
4740
4741 static int
4742 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4743 {
4744 uint32_t eecd = 0;
4745
4746 if (sc->sc_type == WM_T_82573) {
4747 eecd = CSR_READ(sc, WMREG_EECD);
4748
4749 /* Isolate bits 15 & 16 */
4750 eecd = ((eecd >> 15) & 0x03);
4751
4752 /* If both bits are set, device is Flash type */
4753 if (eecd == 0x03) {
4754 return 0;
4755 }
4756 }
4757 return 1;
4758 }
4759
4760 static int
4761 wm_get_swsm_semaphore(struct wm_softc *sc)
4762 {
4763 int32_t timeout;
4764 uint32_t swsm;
4765
4766 /* Get the FW semaphore. */
4767 timeout = 1000 + 1; /* XXX */
4768 while (timeout) {
4769 swsm = CSR_READ(sc, WMREG_SWSM);
4770 swsm |= SWSM_SWESMBI;
4771 CSR_WRITE(sc, WMREG_SWSM, swsm);
4772 /* if we managed to set the bit we got the semaphore. */
4773 swsm = CSR_READ(sc, WMREG_SWSM);
4774 if (swsm & SWSM_SWESMBI)
4775 break;
4776
4777 delay(50);
4778 timeout--;
4779 }
4780
4781 if (timeout == 0) {
4782 aprint_error("%s: could not acquire EEPROM GNT\n",
4783 sc->sc_dev.dv_xname);
4784 /* Release semaphores */
4785 wm_put_swsm_semaphore(sc);
4786 return 1;
4787 }
4788 return 0;
4789 }
4790
4791 static void
4792 wm_put_swsm_semaphore(struct wm_softc *sc)
4793 {
4794 uint32_t swsm;
4795
4796 swsm = CSR_READ(sc, WMREG_SWSM);
4797 swsm &= ~(SWSM_SWESMBI);
4798 CSR_WRITE(sc, WMREG_SWSM, swsm);
4799 }
4800
4801 static int
4802 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4803 {
4804 uint32_t swfw_sync;
4805 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4806 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4807 int timeout = 200;
4808
4809 for(timeout = 0; timeout < 200; timeout++) {
4810 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4811 if (wm_get_swsm_semaphore(sc))
4812 return 1;
4813 }
4814 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4815 if ((swfw_sync & (swmask | fwmask)) == 0) {
4816 swfw_sync |= swmask;
4817 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4818 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4819 wm_put_swsm_semaphore(sc);
4820 return 0;
4821 }
4822 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4823 wm_put_swsm_semaphore(sc);
4824 delay(5000);
4825 }
4826 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4827 sc->sc_dev.dv_xname, mask, swfw_sync);
4828 return 1;
4829 }
4830
4831 static void
4832 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4833 {
4834 uint32_t swfw_sync;
4835
4836 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4837 while (wm_get_swsm_semaphore(sc) != 0)
4838 continue;
4839 }
4840 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4841 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4842 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4843 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4844 wm_put_swsm_semaphore(sc);
4845 }
4846
4847 static int
4848 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4849 {
4850 uint32_t ext_ctrl;
4851 int timeout = 200;
4852
4853 for(timeout = 0; timeout < 200; timeout++) {
4854 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4855 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4856 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4857
4858 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4859 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4860 return 0;
4861 delay(5000);
4862 }
4863 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4864 sc->sc_dev.dv_xname, ext_ctrl);
4865 return 1;
4866 }
4867
4868 static void
4869 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4870 {
4871 uint32_t ext_ctrl;
4872 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4873 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4874 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4875 }
4876
4877 /******************************************************************************
4878 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4879 * register.
4880 *
4881 * sc - Struct containing variables accessed by shared code
4882 * offset - offset of word in the EEPROM to read
4883 * data - word read from the EEPROM
4884 * words - number of words to read
4885 *****************************************************************************/
4886 static int
4887 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4888 {
4889 int32_t error = 0;
4890 uint32_t flash_bank = 0;
4891 uint32_t act_offset = 0;
4892 uint32_t bank_offset = 0;
4893 uint16_t word = 0;
4894 uint16_t i = 0;
4895
4896 /* We need to know which is the valid flash bank. In the event
4897 * that we didn't allocate eeprom_shadow_ram, we may not be
4898 * managing flash_bank. So it cannot be trusted and needs
4899 * to be updated with each read.
4900 */
4901 /* Value of bit 22 corresponds to the flash bank we're on. */
4902 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4903
4904 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4905 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4906
4907 error = wm_get_swfwhw_semaphore(sc);
4908 if (error)
4909 return error;
4910
4911 for (i = 0; i < words; i++) {
4912 /* The NVM part needs a byte offset, hence * 2 */
4913 act_offset = bank_offset + ((offset + i) * 2);
4914 error = wm_read_ich8_word(sc, act_offset, &word);
4915 if (error)
4916 break;
4917 data[i] = word;
4918 }
4919
4920 wm_put_swfwhw_semaphore(sc);
4921 return error;
4922 }
4923
4924 /******************************************************************************
4925 * This function does initial flash setup so that a new read/write/erase cycle
4926 * can be started.
4927 *
4928 * sc - The pointer to the hw structure
4929 ****************************************************************************/
4930 static int32_t
4931 wm_ich8_cycle_init(struct wm_softc *sc)
4932 {
4933 uint16_t hsfsts;
4934 int32_t error = 1;
4935 int32_t i = 0;
4936
4937 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4938
4939 /* May be check the Flash Des Valid bit in Hw status */
4940 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4941 return error;
4942 }
4943
4944 /* Clear FCERR in Hw status by writing 1 */
4945 /* Clear DAEL in Hw status by writing a 1 */
4946 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4947
4948 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4949
4950 /* Either we should have a hardware SPI cycle in progress bit to check
4951 * against, in order to start a new cycle or FDONE bit should be changed
4952 * in the hardware so that it is 1 after harware reset, which can then be
4953 * used as an indication whether a cycle is in progress or has been
4954 * completed .. we should also have some software semaphore mechanism to
4955 * guard FDONE or the cycle in progress bit so that two threads access to
4956 * those bits can be sequentiallized or a way so that 2 threads dont
4957 * start the cycle at the same time */
4958
4959 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4960 /* There is no cycle running at present, so we can start a cycle */
4961 /* Begin by setting Flash Cycle Done. */
4962 hsfsts |= HSFSTS_DONE;
4963 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4964 error = 0;
4965 } else {
4966 /* otherwise poll for sometime so the current cycle has a chance
4967 * to end before giving up. */
4968 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
4969 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4970 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4971 error = 0;
4972 break;
4973 }
4974 delay(1);
4975 }
4976 if (error == 0) {
4977 /* Successful in waiting for previous cycle to timeout,
4978 * now set the Flash Cycle Done. */
4979 hsfsts |= HSFSTS_DONE;
4980 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4981 }
4982 }
4983 return error;
4984 }
4985
4986 /******************************************************************************
4987 * This function starts a flash cycle and waits for its completion
4988 *
4989 * sc - The pointer to the hw structure
4990 ****************************************************************************/
4991 static int32_t
4992 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
4993 {
4994 uint16_t hsflctl;
4995 uint16_t hsfsts;
4996 int32_t error = 1;
4997 uint32_t i = 0;
4998
4999 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5000 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5001 hsflctl |= HSFCTL_GO;
5002 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5003
5004 /* wait till FDONE bit is set to 1 */
5005 do {
5006 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5007 if (hsfsts & HSFSTS_DONE)
5008 break;
5009 delay(1);
5010 i++;
5011 } while (i < timeout);
5012 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5013 error = 0;
5014 }
5015 return error;
5016 }
5017
5018 /******************************************************************************
5019 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5020 *
5021 * sc - The pointer to the hw structure
5022 * index - The index of the byte or word to read.
5023 * size - Size of data to read, 1=byte 2=word
5024 * data - Pointer to the word to store the value read.
5025 *****************************************************************************/
5026 static int32_t
5027 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5028 uint32_t size, uint16_t* data)
5029 {
5030 uint16_t hsfsts;
5031 uint16_t hsflctl;
5032 uint32_t flash_linear_address;
5033 uint32_t flash_data = 0;
5034 int32_t error = 1;
5035 int32_t count = 0;
5036
5037 if (size < 1 || size > 2 || data == 0x0 ||
5038 index > ICH_FLASH_LINEAR_ADDR_MASK)
5039 return error;
5040
5041 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5042 sc->sc_ich8_flash_base;
5043
5044 do {
5045 delay(1);
5046 /* Steps */
5047 error = wm_ich8_cycle_init(sc);
5048 if (error)
5049 break;
5050
5051 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5052 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5053 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5054 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5055 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5056
5057 /* Write the last 24 bits of index into Flash Linear address field in
5058 * Flash Address */
5059 /* TODO: TBD maybe check the index against the size of flash */
5060
5061 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5062
5063 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5064
5065 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5066 * sequence a few more times, else read in (shift in) the Flash Data0,
5067 * the order is least significant byte first msb to lsb */
5068 if (error == 0) {
5069 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5070 if (size == 1) {
5071 *data = (uint8_t)(flash_data & 0x000000FF);
5072 } else if (size == 2) {
5073 *data = (uint16_t)(flash_data & 0x0000FFFF);
5074 }
5075 break;
5076 } else {
5077 /* If we've gotten here, then things are probably completely hosed,
5078 * but if the error condition is detected, it won't hurt to give
5079 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5080 */
5081 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5082 if (hsfsts & HSFSTS_ERR) {
5083 /* Repeat for some time before giving up. */
5084 continue;
5085 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5086 break;
5087 }
5088 }
5089 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5090
5091 return error;
5092 }
5093
5094 #if 0
5095 /******************************************************************************
5096 * Reads a single byte from the NVM using the ICH8 flash access registers.
5097 *
5098 * sc - pointer to wm_hw structure
5099 * index - The index of the byte to read.
5100 * data - Pointer to a byte to store the value read.
5101 *****************************************************************************/
5102 static int32_t
5103 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5104 {
5105 int32_t status;
5106 uint16_t word = 0;
5107
5108 status = wm_read_ich8_data(sc, index, 1, &word);
5109 if (status == 0) {
5110 *data = (uint8_t)word;
5111 }
5112
5113 return status;
5114 }
5115 #endif
5116
5117 /******************************************************************************
5118 * Reads a word from the NVM using the ICH8 flash access registers.
5119 *
5120 * sc - pointer to wm_hw structure
5121 * index - The starting byte index of the word to read.
5122 * data - Pointer to a word to store the value read.
5123 *****************************************************************************/
5124 static int32_t
5125 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5126 {
5127 int32_t status;
5128
5129 status = wm_read_ich8_data(sc, index, 2, data);
5130 return status;
5131 }
5132