if_wm.c revision 1.147 1 /* $NetBSD: if_wm.c,v 1.147 2007/10/19 12:00:49 ad Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*******************************************************************************
39
40 Copyright (c) 2001-2005, Intel Corporation
41 All rights reserved.
42
43 Redistribution and use in source and binary forms, with or without
44 modification, are permitted provided that the following conditions are met:
45
46 1. Redistributions of source code must retain the above copyright notice,
47 this list of conditions and the following disclaimer.
48
49 2. Redistributions in binary form must reproduce the above copyright
50 notice, this list of conditions and the following disclaimer in the
51 documentation and/or other materials provided with the distribution.
52
53 3. Neither the name of the Intel Corporation nor the names of its
54 contributors may be used to endorse or promote products derived from
55 this software without specific prior written permission.
56
57 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67 POSSIBILITY OF SUCH DAMAGE.
68
69 *******************************************************************************/
70 /*
71 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72 *
73 * TODO (in order of importance):
74 *
75 * - Rework how parameters are loaded from the EEPROM.
76 * - Figure out what to do with the i82545GM and i82546GB
77 * SERDES controllers.
78 * - Fix hw VLAN assist.
79 */
80
81 #include <sys/cdefs.h>
82 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.147 2007/10/19 12:00:49 ad Exp $");
83
84 #include "bpfilter.h"
85 #include "rnd.h"
86
87 #include <sys/param.h>
88 #include <sys/systm.h>
89 #include <sys/callout.h>
90 #include <sys/mbuf.h>
91 #include <sys/malloc.h>
92 #include <sys/kernel.h>
93 #include <sys/socket.h>
94 #include <sys/ioctl.h>
95 #include <sys/errno.h>
96 #include <sys/device.h>
97 #include <sys/queue.h>
98 #include <sys/syslog.h>
99
100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
101
102 #if NRND > 0
103 #include <sys/rnd.h>
104 #endif
105
106 #include <net/if.h>
107 #include <net/if_dl.h>
108 #include <net/if_media.h>
109 #include <net/if_ether.h>
110
111 #if NBPFILTER > 0
112 #include <net/bpf.h>
113 #endif
114
115 #include <netinet/in.h> /* XXX for struct ip */
116 #include <netinet/in_systm.h> /* XXX for struct ip */
117 #include <netinet/ip.h> /* XXX for struct ip */
118 #include <netinet/ip6.h> /* XXX for struct ip6_hdr */
119 #include <netinet/tcp.h> /* XXX for struct tcphdr */
120
121 #include <sys/bus.h>
122 #include <sys/intr.h>
123 #include <machine/endian.h>
124
125 #include <dev/mii/mii.h>
126 #include <dev/mii/miivar.h>
127 #include <dev/mii/mii_bitbang.h>
128 #include <dev/mii/ikphyreg.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133
134 #include <dev/pci/if_wmreg.h>
135
136 #ifdef WM_DEBUG
137 #define WM_DEBUG_LINK 0x01
138 #define WM_DEBUG_TX 0x02
139 #define WM_DEBUG_RX 0x04
140 #define WM_DEBUG_GMII 0x08
141 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
142
143 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
144 #else
145 #define DPRINTF(x, y) /* nothing */
146 #endif /* WM_DEBUG */
147
148 /*
149 * Transmit descriptor list size. Due to errata, we can only have
150 * 256 hardware descriptors in the ring on < 82544, but we use 4096
151 * on >= 82544. We tell the upper layers that they can queue a lot
152 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
153 * of them at a time.
154 *
155 * We allow up to 256 (!) DMA segments per packet. Pathological packet
156 * chains containing many small mbufs have been observed in zero-copy
157 * situations with jumbo frames.
158 */
159 #define WM_NTXSEGS 256
160 #define WM_IFQUEUELEN 256
161 #define WM_TXQUEUELEN_MAX 64
162 #define WM_TXQUEUELEN_MAX_82547 16
163 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
164 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
165 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
166 #define WM_NTXDESC_82542 256
167 #define WM_NTXDESC_82544 4096
168 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
169 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
170 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
171 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
172 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
173
174 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
175
176 /*
177 * Receive descriptor list size. We have one Rx buffer for normal
178 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
179 * packet. We allocate 256 receive descriptors, each with a 2k
180 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
181 */
182 #define WM_NRXDESC 256
183 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
184 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
185 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
186
187 /*
188 * Control structures are DMA'd to the i82542 chip. We allocate them in
189 * a single clump that maps to a single DMA segment to make several things
190 * easier.
191 */
192 struct wm_control_data_82544 {
193 /*
194 * The receive descriptors.
195 */
196 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
197
198 /*
199 * The transmit descriptors. Put these at the end, because
200 * we might use a smaller number of them.
201 */
202 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
203 };
204
205 struct wm_control_data_82542 {
206 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
207 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
208 };
209
210 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
211 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
212 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
213
214 /*
215 * Software state for transmit jobs.
216 */
217 struct wm_txsoft {
218 struct mbuf *txs_mbuf; /* head of our mbuf chain */
219 bus_dmamap_t txs_dmamap; /* our DMA map */
220 int txs_firstdesc; /* first descriptor in packet */
221 int txs_lastdesc; /* last descriptor in packet */
222 int txs_ndesc; /* # of descriptors used */
223 };
224
225 /*
226 * Software state for receive buffers. Each descriptor gets a
227 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
228 * more than one buffer, we chain them together.
229 */
230 struct wm_rxsoft {
231 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
232 bus_dmamap_t rxs_dmamap; /* our DMA map */
233 };
234
235 typedef enum {
236 WM_T_unknown = 0,
237 WM_T_82542_2_0, /* i82542 2.0 (really old) */
238 WM_T_82542_2_1, /* i82542 2.1+ (old) */
239 WM_T_82543, /* i82543 */
240 WM_T_82544, /* i82544 */
241 WM_T_82540, /* i82540 */
242 WM_T_82545, /* i82545 */
243 WM_T_82545_3, /* i82545 3.0+ */
244 WM_T_82546, /* i82546 */
245 WM_T_82546_3, /* i82546 3.0+ */
246 WM_T_82541, /* i82541 */
247 WM_T_82541_2, /* i82541 2.0+ */
248 WM_T_82547, /* i82547 */
249 WM_T_82547_2, /* i82547 2.0+ */
250 WM_T_82571, /* i82571 */
251 WM_T_82572, /* i82572 */
252 WM_T_82573, /* i82573 */
253 WM_T_80003, /* i80003 */
254 WM_T_ICH8, /* ICH8 LAN */
255 WM_T_ICH9, /* ICH9 LAN */
256 } wm_chip_type;
257
258 /*
259 * Software state per device.
260 */
261 struct wm_softc {
262 struct device sc_dev; /* generic device information */
263 bus_space_tag_t sc_st; /* bus space tag */
264 bus_space_handle_t sc_sh; /* bus space handle */
265 bus_space_tag_t sc_iot; /* I/O space tag */
266 bus_space_handle_t sc_ioh; /* I/O space handle */
267 bus_space_tag_t sc_flasht; /* flash registers space tag */
268 bus_space_handle_t sc_flashh; /* flash registers space handle */
269 bus_dma_tag_t sc_dmat; /* bus DMA tag */
270 struct ethercom sc_ethercom; /* ethernet common data */
271 void *sc_sdhook; /* shutdown hook */
272 void *sc_powerhook; /* power hook */
273 pci_chipset_tag_t sc_pc;
274 pcitag_t sc_pcitag;
275 struct pci_conf_state sc_pciconf;
276
277 wm_chip_type sc_type; /* chip type */
278 int sc_flags; /* flags; see below */
279 int sc_bus_speed; /* PCI/PCIX bus speed */
280 int sc_pcix_offset; /* PCIX capability register offset */
281 int sc_flowflags; /* 802.3x flow control flags */
282
283 void *sc_ih; /* interrupt cookie */
284
285 int sc_ee_addrbits; /* EEPROM address bits */
286
287 struct mii_data sc_mii; /* MII/media information */
288
289 callout_t sc_tick_ch; /* tick callout */
290
291 bus_dmamap_t sc_cddmamap; /* control data DMA map */
292 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
293
294 int sc_align_tweak;
295
296 /*
297 * Software state for the transmit and receive descriptors.
298 */
299 int sc_txnum; /* must be a power of two */
300 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
301 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
302
303 /*
304 * Control data structures.
305 */
306 int sc_ntxdesc; /* must be a power of two */
307 struct wm_control_data_82544 *sc_control_data;
308 #define sc_txdescs sc_control_data->wcd_txdescs
309 #define sc_rxdescs sc_control_data->wcd_rxdescs
310
311 #ifdef WM_EVENT_COUNTERS
312 /* Event counters. */
313 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
314 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
315 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
316 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
317 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
318 struct evcnt sc_ev_rxintr; /* Rx interrupts */
319 struct evcnt sc_ev_linkintr; /* Link interrupts */
320
321 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
322 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
323 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
324 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
325 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
326 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound (IPv4) */
327 struct evcnt sc_ev_txtso6; /* TCP seg offload out-bound (IPv6) */
328 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
329
330 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
331 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
332
333 struct evcnt sc_ev_tu; /* Tx underrun */
334
335 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
336 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
337 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
338 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
339 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
340 #endif /* WM_EVENT_COUNTERS */
341
342 bus_addr_t sc_tdt_reg; /* offset of TDT register */
343
344 int sc_txfree; /* number of free Tx descriptors */
345 int sc_txnext; /* next ready Tx descriptor */
346
347 int sc_txsfree; /* number of free Tx jobs */
348 int sc_txsnext; /* next free Tx job */
349 int sc_txsdirty; /* dirty Tx jobs */
350
351 /* These 5 variables are used only on the 82547. */
352 int sc_txfifo_size; /* Tx FIFO size */
353 int sc_txfifo_head; /* current head of FIFO */
354 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
355 int sc_txfifo_stall; /* Tx FIFO is stalled */
356 callout_t sc_txfifo_ch; /* Tx FIFO stall work-around timer */
357
358 bus_addr_t sc_rdt_reg; /* offset of RDT register */
359
360 int sc_rxptr; /* next ready Rx descriptor/queue ent */
361 int sc_rxdiscard;
362 int sc_rxlen;
363 struct mbuf *sc_rxhead;
364 struct mbuf *sc_rxtail;
365 struct mbuf **sc_rxtailp;
366
367 uint32_t sc_ctrl; /* prototype CTRL register */
368 #if 0
369 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
370 #endif
371 uint32_t sc_icr; /* prototype interrupt bits */
372 uint32_t sc_itr; /* prototype intr throttling reg */
373 uint32_t sc_tctl; /* prototype TCTL register */
374 uint32_t sc_rctl; /* prototype RCTL register */
375 uint32_t sc_txcw; /* prototype TXCW register */
376 uint32_t sc_tipg; /* prototype TIPG register */
377 uint32_t sc_fcrtl; /* prototype FCRTL register */
378 uint32_t sc_pba; /* prototype PBA register */
379
380 int sc_tbi_linkup; /* TBI link status */
381 int sc_tbi_anstate; /* autonegotiation state */
382
383 int sc_mchash_type; /* multicast filter offset */
384
385 #if NRND > 0
386 rndsource_element_t rnd_source; /* random source */
387 #endif
388 int sc_ich8_flash_base;
389 int sc_ich8_flash_bank_size;
390 };
391
392 #define WM_RXCHAIN_RESET(sc) \
393 do { \
394 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
395 *(sc)->sc_rxtailp = NULL; \
396 (sc)->sc_rxlen = 0; \
397 } while (/*CONSTCOND*/0)
398
399 #define WM_RXCHAIN_LINK(sc, m) \
400 do { \
401 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
402 (sc)->sc_rxtailp = &(m)->m_next; \
403 } while (/*CONSTCOND*/0)
404
405 /* sc_flags */
406 #define WM_F_HAS_MII 0x0001 /* has MII */
407 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
408 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
409 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
410 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
411 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
412 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
413 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
414 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
415 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
416 #define WM_F_CSA 0x0400 /* bus is CSA */
417 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
418 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
419 #define WM_F_SWFWHW_SYNC 0x2000 /* Software-Firmware synchronisation */
420
421 #ifdef WM_EVENT_COUNTERS
422 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
423 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
424 #else
425 #define WM_EVCNT_INCR(ev) /* nothing */
426 #define WM_EVCNT_ADD(ev, val) /* nothing */
427 #endif
428
429 #define CSR_READ(sc, reg) \
430 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
431 #define CSR_WRITE(sc, reg, val) \
432 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
433 #define CSR_WRITE_FLUSH(sc) \
434 (void) CSR_READ((sc), WMREG_STATUS)
435
436 #define ICH8_FLASH_READ32(sc, reg) \
437 bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
438 #define ICH8_FLASH_WRITE32(sc, reg, data) \
439 bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
440
441 #define ICH8_FLASH_READ16(sc, reg) \
442 bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
443 #define ICH8_FLASH_WRITE16(sc, reg, data) \
444 bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
445
446 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
447 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
448
449 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
450 #define WM_CDTXADDR_HI(sc, x) \
451 (sizeof(bus_addr_t) == 8 ? \
452 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
453
454 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
455 #define WM_CDRXADDR_HI(sc, x) \
456 (sizeof(bus_addr_t) == 8 ? \
457 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
458
459 #define WM_CDTXSYNC(sc, x, n, ops) \
460 do { \
461 int __x, __n; \
462 \
463 __x = (x); \
464 __n = (n); \
465 \
466 /* If it will wrap around, sync to the end of the ring. */ \
467 if ((__x + __n) > WM_NTXDESC(sc)) { \
468 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
469 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
470 (WM_NTXDESC(sc) - __x), (ops)); \
471 __n -= (WM_NTXDESC(sc) - __x); \
472 __x = 0; \
473 } \
474 \
475 /* Now sync whatever is left. */ \
476 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
477 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
478 } while (/*CONSTCOND*/0)
479
480 #define WM_CDRXSYNC(sc, x, ops) \
481 do { \
482 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
483 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
484 } while (/*CONSTCOND*/0)
485
486 #define WM_INIT_RXDESC(sc, x) \
487 do { \
488 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
489 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
490 struct mbuf *__m = __rxs->rxs_mbuf; \
491 \
492 /* \
493 * Note: We scoot the packet forward 2 bytes in the buffer \
494 * so that the payload after the Ethernet header is aligned \
495 * to a 4-byte boundary. \
496 * \
497 * XXX BRAINDAMAGE ALERT! \
498 * The stupid chip uses the same size for every buffer, which \
499 * is set in the Receive Control register. We are using the 2K \
500 * size option, but what we REALLY want is (2K - 2)! For this \
501 * reason, we can't "scoot" packets longer than the standard \
502 * Ethernet MTU. On strict-alignment platforms, if the total \
503 * size exceeds (2K - 2) we set align_tweak to 0 and let \
504 * the upper layer copy the headers. \
505 */ \
506 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
507 \
508 wm_set_dma_addr(&__rxd->wrx_addr, \
509 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
510 __rxd->wrx_len = 0; \
511 __rxd->wrx_cksum = 0; \
512 __rxd->wrx_status = 0; \
513 __rxd->wrx_errors = 0; \
514 __rxd->wrx_special = 0; \
515 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
516 \
517 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
518 } while (/*CONSTCOND*/0)
519
520 static void wm_start(struct ifnet *);
521 static void wm_watchdog(struct ifnet *);
522 static int wm_ioctl(struct ifnet *, u_long, void *);
523 static int wm_init(struct ifnet *);
524 static void wm_stop(struct ifnet *, int);
525
526 static void wm_shutdown(void *);
527 static void wm_powerhook(int, void *);
528
529 static void wm_reset(struct wm_softc *);
530 static void wm_rxdrain(struct wm_softc *);
531 static int wm_add_rxbuf(struct wm_softc *, int);
532 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
533 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
534 static int wm_validate_eeprom_checksum(struct wm_softc *);
535 static void wm_tick(void *);
536
537 static void wm_set_filter(struct wm_softc *);
538
539 static int wm_intr(void *);
540 static void wm_txintr(struct wm_softc *);
541 static void wm_rxintr(struct wm_softc *);
542 static void wm_linkintr(struct wm_softc *, uint32_t);
543
544 static void wm_tbi_mediainit(struct wm_softc *);
545 static int wm_tbi_mediachange(struct ifnet *);
546 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
547
548 static void wm_tbi_set_linkled(struct wm_softc *);
549 static void wm_tbi_check_link(struct wm_softc *);
550
551 static void wm_gmii_reset(struct wm_softc *);
552
553 static int wm_gmii_i82543_readreg(struct device *, int, int);
554 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
555
556 static int wm_gmii_i82544_readreg(struct device *, int, int);
557 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
558
559 static int wm_gmii_i80003_readreg(struct device *, int, int);
560 static void wm_gmii_i80003_writereg(struct device *, int, int, int);
561
562 static void wm_gmii_statchg(struct device *);
563
564 static void wm_gmii_mediainit(struct wm_softc *);
565 static int wm_gmii_mediachange(struct ifnet *);
566 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
567
568 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
569 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
570
571 static int wm_match(struct device *, struct cfdata *, void *);
572 static void wm_attach(struct device *, struct device *, void *);
573 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
574 static void wm_get_auto_rd_done(struct wm_softc *);
575 static int wm_get_swsm_semaphore(struct wm_softc *);
576 static void wm_put_swsm_semaphore(struct wm_softc *);
577 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
578 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
579 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
580 static int wm_get_swfwhw_semaphore(struct wm_softc *);
581 static void wm_put_swfwhw_semaphore(struct wm_softc *);
582
583 static int wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
584 static int32_t wm_ich8_cycle_init(struct wm_softc *);
585 static int32_t wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
586 static int32_t wm_read_ich8_data(struct wm_softc *, uint32_t,
587 uint32_t, uint16_t *);
588 static int32_t wm_read_ich8_word(struct wm_softc *sc, uint32_t, uint16_t *);
589
590 CFATTACH_DECL(wm, sizeof(struct wm_softc),
591 wm_match, wm_attach, NULL, NULL);
592
593 static void wm_82547_txfifo_stall(void *);
594
595 /*
596 * Devices supported by this driver.
597 */
598 static const struct wm_product {
599 pci_vendor_id_t wmp_vendor;
600 pci_product_id_t wmp_product;
601 const char *wmp_name;
602 wm_chip_type wmp_type;
603 int wmp_flags;
604 #define WMP_F_1000X 0x01
605 #define WMP_F_1000T 0x02
606 } wm_products[] = {
607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
608 "Intel i82542 1000BASE-X Ethernet",
609 WM_T_82542_2_1, WMP_F_1000X },
610
611 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
612 "Intel i82543GC 1000BASE-X Ethernet",
613 WM_T_82543, WMP_F_1000X },
614
615 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
616 "Intel i82543GC 1000BASE-T Ethernet",
617 WM_T_82543, WMP_F_1000T },
618
619 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
620 "Intel i82544EI 1000BASE-T Ethernet",
621 WM_T_82544, WMP_F_1000T },
622
623 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
624 "Intel i82544EI 1000BASE-X Ethernet",
625 WM_T_82544, WMP_F_1000X },
626
627 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
628 "Intel i82544GC 1000BASE-T Ethernet",
629 WM_T_82544, WMP_F_1000T },
630
631 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
632 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
633 WM_T_82544, WMP_F_1000T },
634
635 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
636 "Intel i82540EM 1000BASE-T Ethernet",
637 WM_T_82540, WMP_F_1000T },
638
639 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
640 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
641 WM_T_82540, WMP_F_1000T },
642
643 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
644 "Intel i82540EP 1000BASE-T Ethernet",
645 WM_T_82540, WMP_F_1000T },
646
647 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
648 "Intel i82540EP 1000BASE-T Ethernet",
649 WM_T_82540, WMP_F_1000T },
650
651 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
652 "Intel i82540EP 1000BASE-T Ethernet",
653 WM_T_82540, WMP_F_1000T },
654
655 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
656 "Intel i82545EM 1000BASE-T Ethernet",
657 WM_T_82545, WMP_F_1000T },
658
659 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
660 "Intel i82545GM 1000BASE-T Ethernet",
661 WM_T_82545_3, WMP_F_1000T },
662
663 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
664 "Intel i82545GM 1000BASE-X Ethernet",
665 WM_T_82545_3, WMP_F_1000X },
666 #if 0
667 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
668 "Intel i82545GM Gigabit Ethernet (SERDES)",
669 WM_T_82545_3, WMP_F_SERDES },
670 #endif
671 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
672 "Intel i82546EB 1000BASE-T Ethernet",
673 WM_T_82546, WMP_F_1000T },
674
675 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
676 "Intel i82546EB 1000BASE-T Ethernet",
677 WM_T_82546, WMP_F_1000T },
678
679 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
680 "Intel i82545EM 1000BASE-X Ethernet",
681 WM_T_82545, WMP_F_1000X },
682
683 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
684 "Intel i82546EB 1000BASE-X Ethernet",
685 WM_T_82546, WMP_F_1000X },
686
687 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
688 "Intel i82546GB 1000BASE-T Ethernet",
689 WM_T_82546_3, WMP_F_1000T },
690
691 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
692 "Intel i82546GB 1000BASE-X Ethernet",
693 WM_T_82546_3, WMP_F_1000X },
694 #if 0
695 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
696 "Intel i82546GB Gigabit Ethernet (SERDES)",
697 WM_T_82546_3, WMP_F_SERDES },
698 #endif
699 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
700 "i82546GB quad-port Gigabit Ethernet",
701 WM_T_82546_3, WMP_F_1000T },
702
703 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
704 "i82546GB quad-port Gigabit Ethernet (KSP3)",
705 WM_T_82546_3, WMP_F_1000T },
706
707 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
708 "Intel PRO/1000MT (82546GB)",
709 WM_T_82546_3, WMP_F_1000T },
710
711 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
712 "Intel i82541EI 1000BASE-T Ethernet",
713 WM_T_82541, WMP_F_1000T },
714
715 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
716 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
717 WM_T_82541, WMP_F_1000T },
718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
720 "Intel i82541EI Mobile 1000BASE-T Ethernet",
721 WM_T_82541, WMP_F_1000T },
722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
724 "Intel i82541ER 1000BASE-T Ethernet",
725 WM_T_82541_2, WMP_F_1000T },
726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
728 "Intel i82541GI 1000BASE-T Ethernet",
729 WM_T_82541_2, WMP_F_1000T },
730
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
732 "Intel i82541GI Mobile 1000BASE-T Ethernet",
733 WM_T_82541_2, WMP_F_1000T },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
736 "Intel i82541PI 1000BASE-T Ethernet",
737 WM_T_82541_2, WMP_F_1000T },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
740 "Intel i82547EI 1000BASE-T Ethernet",
741 WM_T_82547, WMP_F_1000T },
742
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
744 "Intel i82547EI Mobile 1000BASE-T Ethernet",
745 WM_T_82547, WMP_F_1000T },
746
747 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
748 "Intel i82547GI 1000BASE-T Ethernet",
749 WM_T_82547_2, WMP_F_1000T },
750
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
752 "Intel PRO/1000 PT (82571EB)",
753 WM_T_82571, WMP_F_1000T },
754
755 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
756 "Intel PRO/1000 PF (82571EB)",
757 WM_T_82571, WMP_F_1000X },
758 #if 0
759 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
760 "Intel PRO/1000 PB (82571EB)",
761 WM_T_82571, WMP_F_SERDES },
762 #endif
763 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
764 "Intel PRO/1000 QT (82571EB)",
765 WM_T_82571, WMP_F_1000T },
766
767 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
768 "Intel i82572EI 1000baseT Ethernet",
769 WM_T_82572, WMP_F_1000T },
770
771 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
772 "Intel i82572EI 1000baseX Ethernet",
773 WM_T_82572, WMP_F_1000X },
774 #if 0
775 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
776 "Intel i82572EI Gigabit Ethernet (SERDES)",
777 WM_T_82572, WMP_F_SERDES },
778 #endif
779
780 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
781 "Intel i82572EI 1000baseT Ethernet",
782 WM_T_82572, WMP_F_1000T },
783
784 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
785 "Intel i82573E",
786 WM_T_82573, WMP_F_1000T },
787
788 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
789 "Intel i82573E IAMT",
790 WM_T_82573, WMP_F_1000T },
791
792 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
793 "Intel i82573L Gigabit Ethernet",
794 WM_T_82573, WMP_F_1000T },
795
796 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
797 "i80003 dual 1000baseT Ethernet",
798 WM_T_80003, WMP_F_1000T },
799
800 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
801 "i80003 dual 1000baseX Ethernet",
802 WM_T_80003, WMP_F_1000T },
803 #if 0
804 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
805 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
806 WM_T_80003, WMP_F_SERDES },
807 #endif
808
809 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
810 "Intel i80003 1000baseT Ethernet",
811 WM_T_80003, WMP_F_1000T },
812 #if 0
813 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
814 "Intel i80003 Gigabit Ethernet (SERDES)",
815 WM_T_80003, WMP_F_SERDES },
816 #endif
817 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_AMT,
818 "Intel i82801H (M_AMT) LAN Controller",
819 WM_T_ICH8, WMP_F_1000T },
820 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_AMT,
821 "Intel i82801H (AMT) LAN Controller",
822 WM_T_ICH8, WMP_F_1000T },
823 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_LAN,
824 "Intel i82801H LAN Controller",
825 WM_T_ICH8, WMP_F_1000T },
826 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_LAN,
827 "Intel i82801H (IFE) LAN Controller",
828 WM_T_ICH8, WMP_F_1000T },
829 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_M_LAN,
830 "Intel i82801H (M) LAN Controller",
831 WM_T_ICH8, WMP_F_1000T },
832 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_GT,
833 "Intel i82801H IFE (GT) LAN Controller",
834 WM_T_ICH8, WMP_F_1000T },
835 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801H_IFE_G,
836 "Intel i82801H IFE (G) LAN Controller",
837 WM_T_ICH8, WMP_F_1000T },
838 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_AMT,
839 "82801I (AMT) LAN Controller",
840 WM_T_ICH9, WMP_F_1000T },
841 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE,
842 "82801I LAN Controller",
843 WM_T_ICH9, WMP_F_1000T },
844 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_G,
845 "82801I (G) LAN Controller",
846 WM_T_ICH9, WMP_F_1000T },
847 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IFE_GT,
848 "82801I (GT) LAN Controller",
849 WM_T_ICH9, WMP_F_1000T },
850 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82801I_IGP_C,
851 "82801I (C) LAN Controller",
852 WM_T_ICH9, WMP_F_1000T },
853 { 0, 0,
854 NULL,
855 0, 0 },
856 };
857
858 #ifdef WM_EVENT_COUNTERS
859 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
860 #endif /* WM_EVENT_COUNTERS */
861
862 #if 0 /* Not currently used */
863 static inline uint32_t
864 wm_io_read(struct wm_softc *sc, int reg)
865 {
866
867 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
868 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
869 }
870 #endif
871
872 static inline void
873 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
874 {
875
876 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
877 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
878 }
879
880 static inline void
881 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
882 {
883 wa->wa_low = htole32(v & 0xffffffffU);
884 if (sizeof(bus_addr_t) == 8)
885 wa->wa_high = htole32((uint64_t) v >> 32);
886 else
887 wa->wa_high = 0;
888 }
889
890 static const struct wm_product *
891 wm_lookup(const struct pci_attach_args *pa)
892 {
893 const struct wm_product *wmp;
894
895 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
896 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
897 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
898 return (wmp);
899 }
900 return (NULL);
901 }
902
903 static int
904 wm_match(struct device *parent, struct cfdata *cf, void *aux)
905 {
906 struct pci_attach_args *pa = aux;
907
908 if (wm_lookup(pa) != NULL)
909 return (1);
910
911 return (0);
912 }
913
914 static void
915 wm_attach(struct device *parent, struct device *self, void *aux)
916 {
917 struct wm_softc *sc = (void *) self;
918 struct pci_attach_args *pa = aux;
919 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
920 pci_chipset_tag_t pc = pa->pa_pc;
921 pci_intr_handle_t ih;
922 size_t cdata_size;
923 const char *intrstr = NULL;
924 const char *eetype;
925 bus_space_tag_t memt;
926 bus_space_handle_t memh;
927 bus_dma_segment_t seg;
928 int memh_valid;
929 int i, rseg, error;
930 const struct wm_product *wmp;
931 prop_data_t ea;
932 prop_number_t pn;
933 uint8_t enaddr[ETHER_ADDR_LEN];
934 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
935 pcireg_t preg, memtype;
936 uint32_t reg;
937
938 callout_init(&sc->sc_tick_ch, 0);
939
940 wmp = wm_lookup(pa);
941 if (wmp == NULL) {
942 printf("\n");
943 panic("wm_attach: impossible");
944 }
945
946 sc->sc_pc = pa->pa_pc;
947 sc->sc_pcitag = pa->pa_tag;
948
949 if (pci_dma64_available(pa))
950 sc->sc_dmat = pa->pa_dmat64;
951 else
952 sc->sc_dmat = pa->pa_dmat;
953
954 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
955 aprint_naive(": Ethernet controller\n");
956 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
957
958 sc->sc_type = wmp->wmp_type;
959 if (sc->sc_type < WM_T_82543) {
960 if (preg < 2) {
961 aprint_error("%s: i82542 must be at least rev. 2\n",
962 sc->sc_dev.dv_xname);
963 return;
964 }
965 if (preg < 3)
966 sc->sc_type = WM_T_82542_2_0;
967 }
968
969 /*
970 * Map the device. All devices support memory-mapped acccess,
971 * and it is really required for normal operation.
972 */
973 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
974 switch (memtype) {
975 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
976 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
977 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
978 memtype, 0, &memt, &memh, NULL, NULL) == 0);
979 break;
980 default:
981 memh_valid = 0;
982 }
983
984 if (memh_valid) {
985 sc->sc_st = memt;
986 sc->sc_sh = memh;
987 } else {
988 aprint_error("%s: unable to map device registers\n",
989 sc->sc_dev.dv_xname);
990 return;
991 }
992
993 /*
994 * In addition, i82544 and later support I/O mapped indirect
995 * register access. It is not desirable (nor supported in
996 * this driver) to use it for normal operation, though it is
997 * required to work around bugs in some chip versions.
998 */
999 if (sc->sc_type >= WM_T_82544) {
1000 /* First we have to find the I/O BAR. */
1001 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1002 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
1003 PCI_MAPREG_TYPE_IO)
1004 break;
1005 }
1006 if (i == PCI_MAPREG_END)
1007 aprint_error("%s: WARNING: unable to find I/O BAR\n",
1008 sc->sc_dev.dv_xname);
1009 else {
1010 /*
1011 * The i8254x doesn't apparently respond when the
1012 * I/O BAR is 0, which looks somewhat like it's not
1013 * been configured.
1014 */
1015 preg = pci_conf_read(pc, pa->pa_tag, i);
1016 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1017 aprint_error("%s: WARNING: I/O BAR at zero.\n",
1018 sc->sc_dev.dv_xname);
1019 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1020 0, &sc->sc_iot, &sc->sc_ioh,
1021 NULL, NULL) == 0) {
1022 sc->sc_flags |= WM_F_IOH_VALID;
1023 } else {
1024 aprint_error("%s: WARNING: unable to map "
1025 "I/O space\n", sc->sc_dev.dv_xname);
1026 }
1027 }
1028
1029 }
1030
1031 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
1032 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1033 preg |= PCI_COMMAND_MASTER_ENABLE;
1034 if (sc->sc_type < WM_T_82542_2_1)
1035 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1036 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1037
1038 /* power up chip */
1039 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
1040 NULL)) && error != EOPNOTSUPP) {
1041 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
1042 error);
1043 return;
1044 }
1045
1046 /*
1047 * Map and establish our interrupt.
1048 */
1049 if (pci_intr_map(pa, &ih)) {
1050 aprint_error("%s: unable to map interrupt\n",
1051 sc->sc_dev.dv_xname);
1052 return;
1053 }
1054 intrstr = pci_intr_string(pc, ih);
1055 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1056 if (sc->sc_ih == NULL) {
1057 aprint_error("%s: unable to establish interrupt",
1058 sc->sc_dev.dv_xname);
1059 if (intrstr != NULL)
1060 aprint_normal(" at %s", intrstr);
1061 aprint_normal("\n");
1062 return;
1063 }
1064 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
1065
1066 /*
1067 * Determine a few things about the bus we're connected to.
1068 */
1069 if (sc->sc_type < WM_T_82543) {
1070 /* We don't really know the bus characteristics here. */
1071 sc->sc_bus_speed = 33;
1072 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1073 /*
1074 * CSA (Communication Streaming Architecture) is about as fast
1075 * a 32-bit 66MHz PCI Bus.
1076 */
1077 sc->sc_flags |= WM_F_CSA;
1078 sc->sc_bus_speed = 66;
1079 aprint_verbose("%s: Communication Streaming Architecture\n",
1080 sc->sc_dev.dv_xname);
1081 if (sc->sc_type == WM_T_82547) {
1082 callout_init(&sc->sc_txfifo_ch, 0);
1083 callout_setfunc(&sc->sc_txfifo_ch,
1084 wm_82547_txfifo_stall, sc);
1085 aprint_verbose("%s: using 82547 Tx FIFO stall "
1086 "work-around\n", sc->sc_dev.dv_xname);
1087 }
1088 } else if (sc->sc_type >= WM_T_82571) {
1089 sc->sc_flags |= WM_F_PCIE;
1090 if ((sc->sc_type != WM_T_ICH8) || (sc->sc_type != WM_T_ICH9))
1091 sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1092 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
1093 } else {
1094 reg = CSR_READ(sc, WMREG_STATUS);
1095 if (reg & STATUS_BUS64)
1096 sc->sc_flags |= WM_F_BUS64;
1097 if (sc->sc_type >= WM_T_82544 &&
1098 (reg & STATUS_PCIX_MODE) != 0) {
1099 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1100
1101 sc->sc_flags |= WM_F_PCIX;
1102 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1103 PCI_CAP_PCIX,
1104 &sc->sc_pcix_offset, NULL) == 0)
1105 aprint_error("%s: unable to find PCIX "
1106 "capability\n", sc->sc_dev.dv_xname);
1107 else if (sc->sc_type != WM_T_82545_3 &&
1108 sc->sc_type != WM_T_82546_3) {
1109 /*
1110 * Work around a problem caused by the BIOS
1111 * setting the max memory read byte count
1112 * incorrectly.
1113 */
1114 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1115 sc->sc_pcix_offset + PCI_PCIX_CMD);
1116 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1117 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1118
1119 bytecnt =
1120 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1121 PCI_PCIX_CMD_BYTECNT_SHIFT;
1122 maxb =
1123 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1124 PCI_PCIX_STATUS_MAXB_SHIFT;
1125 if (bytecnt > maxb) {
1126 aprint_verbose("%s: resetting PCI-X "
1127 "MMRBC: %d -> %d\n",
1128 sc->sc_dev.dv_xname,
1129 512 << bytecnt, 512 << maxb);
1130 pcix_cmd = (pcix_cmd &
1131 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1132 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1133 pci_conf_write(pa->pa_pc, pa->pa_tag,
1134 sc->sc_pcix_offset + PCI_PCIX_CMD,
1135 pcix_cmd);
1136 }
1137 }
1138 }
1139 /*
1140 * The quad port adapter is special; it has a PCIX-PCIX
1141 * bridge on the board, and can run the secondary bus at
1142 * a higher speed.
1143 */
1144 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1145 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1146 : 66;
1147 } else if (sc->sc_flags & WM_F_PCIX) {
1148 switch (reg & STATUS_PCIXSPD_MASK) {
1149 case STATUS_PCIXSPD_50_66:
1150 sc->sc_bus_speed = 66;
1151 break;
1152 case STATUS_PCIXSPD_66_100:
1153 sc->sc_bus_speed = 100;
1154 break;
1155 case STATUS_PCIXSPD_100_133:
1156 sc->sc_bus_speed = 133;
1157 break;
1158 default:
1159 aprint_error(
1160 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
1161 sc->sc_dev.dv_xname,
1162 reg & STATUS_PCIXSPD_MASK);
1163 sc->sc_bus_speed = 66;
1164 }
1165 } else
1166 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1167 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
1168 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1169 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1170 }
1171
1172 /*
1173 * Allocate the control data structures, and create and load the
1174 * DMA map for it.
1175 *
1176 * NOTE: All Tx descriptors must be in the same 4G segment of
1177 * memory. So must Rx descriptors. We simplify by allocating
1178 * both sets within the same 4G segment.
1179 */
1180 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1181 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1182 cdata_size = sc->sc_type < WM_T_82544 ?
1183 sizeof(struct wm_control_data_82542) :
1184 sizeof(struct wm_control_data_82544);
1185 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1186 (bus_size_t) 0x100000000ULL,
1187 &seg, 1, &rseg, 0)) != 0) {
1188 aprint_error(
1189 "%s: unable to allocate control data, error = %d\n",
1190 sc->sc_dev.dv_xname, error);
1191 goto fail_0;
1192 }
1193
1194 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1195 (void **)&sc->sc_control_data, 0)) != 0) {
1196 aprint_error("%s: unable to map control data, error = %d\n",
1197 sc->sc_dev.dv_xname, error);
1198 goto fail_1;
1199 }
1200
1201 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1202 0, 0, &sc->sc_cddmamap)) != 0) {
1203 aprint_error("%s: unable to create control data DMA map, "
1204 "error = %d\n", sc->sc_dev.dv_xname, error);
1205 goto fail_2;
1206 }
1207
1208 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1209 sc->sc_control_data, cdata_size, NULL,
1210 0)) != 0) {
1211 aprint_error(
1212 "%s: unable to load control data DMA map, error = %d\n",
1213 sc->sc_dev.dv_xname, error);
1214 goto fail_3;
1215 }
1216
1217
1218 /*
1219 * Create the transmit buffer DMA maps.
1220 */
1221 WM_TXQUEUELEN(sc) =
1222 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1223 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1224 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1225 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1226 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1227 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1228 aprint_error("%s: unable to create Tx DMA map %d, "
1229 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1230 goto fail_4;
1231 }
1232 }
1233
1234 /*
1235 * Create the receive buffer DMA maps.
1236 */
1237 for (i = 0; i < WM_NRXDESC; i++) {
1238 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1239 MCLBYTES, 0, 0,
1240 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1241 aprint_error("%s: unable to create Rx DMA map %d, "
1242 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1243 goto fail_5;
1244 }
1245 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1246 }
1247
1248 /* clear interesting stat counters */
1249 CSR_READ(sc, WMREG_COLC);
1250 CSR_READ(sc, WMREG_RXERRC);
1251
1252 /*
1253 * Reset the chip to a known state.
1254 */
1255 wm_reset(sc);
1256
1257 /*
1258 * Get some information about the EEPROM.
1259 */
1260 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
1261 uint32_t flash_size;
1262 sc->sc_flags |= WM_F_SWFWHW_SYNC | WM_F_EEPROM_FLASH;
1263 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1264 if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1265 &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1266 printf("%s: can't map FLASH registers\n",
1267 sc->sc_dev.dv_xname);
1268 return;
1269 }
1270 flash_size = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1271 sc->sc_ich8_flash_base = (flash_size & ICH_GFPREG_BASE_MASK) *
1272 ICH_FLASH_SECTOR_SIZE;
1273 sc->sc_ich8_flash_bank_size =
1274 ((flash_size >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1275 sc->sc_ich8_flash_bank_size -=
1276 (flash_size & ICH_GFPREG_BASE_MASK);
1277 sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1278 sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1279 } else if (sc->sc_type == WM_T_80003)
1280 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1281 else if (sc->sc_type == WM_T_82573)
1282 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1283 else if (sc->sc_type > WM_T_82544)
1284 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1285
1286 if (sc->sc_type <= WM_T_82544)
1287 sc->sc_ee_addrbits = 6;
1288 else if (sc->sc_type <= WM_T_82546_3) {
1289 reg = CSR_READ(sc, WMREG_EECD);
1290 if (reg & EECD_EE_SIZE)
1291 sc->sc_ee_addrbits = 8;
1292 else
1293 sc->sc_ee_addrbits = 6;
1294 } else if (sc->sc_type <= WM_T_82547_2) {
1295 reg = CSR_READ(sc, WMREG_EECD);
1296 if (reg & EECD_EE_TYPE) {
1297 sc->sc_flags |= WM_F_EEPROM_SPI;
1298 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1299 } else
1300 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1301 } else if ((sc->sc_type == WM_T_82573) &&
1302 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1303 sc->sc_flags |= WM_F_EEPROM_FLASH;
1304 } else {
1305 /* Assume everything else is SPI. */
1306 reg = CSR_READ(sc, WMREG_EECD);
1307 sc->sc_flags |= WM_F_EEPROM_SPI;
1308 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1309 }
1310
1311 /*
1312 * Defer printing the EEPROM type until after verifying the checksum
1313 * This allows the EEPROM type to be printed correctly in the case
1314 * that no EEPROM is attached.
1315 */
1316
1317
1318 /*
1319 * Validate the EEPROM checksum. If the checksum fails, flag this for
1320 * later, so we can fail future reads from the EEPROM.
1321 */
1322 if (wm_validate_eeprom_checksum(sc))
1323 sc->sc_flags |= WM_F_EEPROM_INVALID;
1324
1325 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1326 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
1327 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1328 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
1329 } else {
1330 if (sc->sc_flags & WM_F_EEPROM_SPI)
1331 eetype = "SPI";
1332 else
1333 eetype = "MicroWire";
1334 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1335 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1336 sc->sc_ee_addrbits, eetype);
1337 }
1338
1339 /*
1340 * Read the Ethernet address from the EEPROM, if not first found
1341 * in device properties.
1342 */
1343 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1344 if (ea != NULL) {
1345 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1346 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1347 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1348 } else {
1349 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1350 sizeof(myea) / sizeof(myea[0]), myea)) {
1351 aprint_error("%s: unable to read Ethernet address\n",
1352 sc->sc_dev.dv_xname);
1353 return;
1354 }
1355 enaddr[0] = myea[0] & 0xff;
1356 enaddr[1] = myea[0] >> 8;
1357 enaddr[2] = myea[1] & 0xff;
1358 enaddr[3] = myea[1] >> 8;
1359 enaddr[4] = myea[2] & 0xff;
1360 enaddr[5] = myea[2] >> 8;
1361 }
1362
1363 /*
1364 * Toggle the LSB of the MAC address on the second port
1365 * of the dual port controller.
1366 */
1367 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1368 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1369 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1370 enaddr[5] ^= 1;
1371 }
1372
1373 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1374 ether_sprintf(enaddr));
1375
1376 /*
1377 * Read the config info from the EEPROM, and set up various
1378 * bits in the control registers based on their contents.
1379 */
1380 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1381 "i82543-cfg1");
1382 if (pn != NULL) {
1383 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1384 cfg1 = (uint16_t) prop_number_integer_value(pn);
1385 } else {
1386 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1387 aprint_error("%s: unable to read CFG1\n",
1388 sc->sc_dev.dv_xname);
1389 return;
1390 }
1391 }
1392
1393 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1394 "i82543-cfg2");
1395 if (pn != NULL) {
1396 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1397 cfg2 = (uint16_t) prop_number_integer_value(pn);
1398 } else {
1399 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1400 aprint_error("%s: unable to read CFG2\n",
1401 sc->sc_dev.dv_xname);
1402 return;
1403 }
1404 }
1405
1406 if (sc->sc_type >= WM_T_82544) {
1407 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1408 "i82543-swdpin");
1409 if (pn != NULL) {
1410 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1411 swdpin = (uint16_t) prop_number_integer_value(pn);
1412 } else {
1413 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1414 aprint_error("%s: unable to read SWDPIN\n",
1415 sc->sc_dev.dv_xname);
1416 return;
1417 }
1418 }
1419 }
1420
1421 if (cfg1 & EEPROM_CFG1_ILOS)
1422 sc->sc_ctrl |= CTRL_ILOS;
1423 if (sc->sc_type >= WM_T_82544) {
1424 sc->sc_ctrl |=
1425 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1426 CTRL_SWDPIO_SHIFT;
1427 sc->sc_ctrl |=
1428 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1429 CTRL_SWDPINS_SHIFT;
1430 } else {
1431 sc->sc_ctrl |=
1432 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1433 CTRL_SWDPIO_SHIFT;
1434 }
1435
1436 #if 0
1437 if (sc->sc_type >= WM_T_82544) {
1438 if (cfg1 & EEPROM_CFG1_IPS0)
1439 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1440 if (cfg1 & EEPROM_CFG1_IPS1)
1441 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1442 sc->sc_ctrl_ext |=
1443 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1444 CTRL_EXT_SWDPIO_SHIFT;
1445 sc->sc_ctrl_ext |=
1446 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1447 CTRL_EXT_SWDPINS_SHIFT;
1448 } else {
1449 sc->sc_ctrl_ext |=
1450 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1451 CTRL_EXT_SWDPIO_SHIFT;
1452 }
1453 #endif
1454
1455 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1456 #if 0
1457 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1458 #endif
1459
1460 /*
1461 * Set up some register offsets that are different between
1462 * the i82542 and the i82543 and later chips.
1463 */
1464 if (sc->sc_type < WM_T_82543) {
1465 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1466 sc->sc_tdt_reg = WMREG_OLD_TDT;
1467 } else {
1468 sc->sc_rdt_reg = WMREG_RDT;
1469 sc->sc_tdt_reg = WMREG_TDT;
1470 }
1471
1472 /*
1473 * Determine if we're TBI or GMII mode, and initialize the
1474 * media structures accordingly.
1475 */
1476 if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1477 || sc->sc_type == WM_T_82573) {
1478 /* STATUS_TBIMODE reserved/reused, can't rely on it */
1479 wm_gmii_mediainit(sc);
1480 } else if (sc->sc_type < WM_T_82543 ||
1481 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1482 if (wmp->wmp_flags & WMP_F_1000T)
1483 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1484 "product!\n", sc->sc_dev.dv_xname);
1485 wm_tbi_mediainit(sc);
1486 } else {
1487 if (wmp->wmp_flags & WMP_F_1000X)
1488 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1489 "product!\n", sc->sc_dev.dv_xname);
1490 wm_gmii_mediainit(sc);
1491 }
1492
1493 ifp = &sc->sc_ethercom.ec_if;
1494 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1495 ifp->if_softc = sc;
1496 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1497 ifp->if_ioctl = wm_ioctl;
1498 ifp->if_start = wm_start;
1499 ifp->if_watchdog = wm_watchdog;
1500 ifp->if_init = wm_init;
1501 ifp->if_stop = wm_stop;
1502 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1503 IFQ_SET_READY(&ifp->if_snd);
1504
1505 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
1506 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1507
1508 /*
1509 * If we're a i82543 or greater, we can support VLANs.
1510 */
1511 if (sc->sc_type >= WM_T_82543)
1512 sc->sc_ethercom.ec_capabilities |=
1513 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1514
1515 /*
1516 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1517 * on i82543 and later.
1518 */
1519 if (sc->sc_type >= WM_T_82543) {
1520 ifp->if_capabilities |=
1521 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1522 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1523 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1524 IFCAP_CSUM_TCPv6_Tx |
1525 IFCAP_CSUM_UDPv6_Tx;
1526 }
1527
1528 /*
1529 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1530 *
1531 * 82541GI (8086:1076) ... no
1532 * 82572EI (8086:10b9) ... yes
1533 */
1534 if (sc->sc_type >= WM_T_82571) {
1535 ifp->if_capabilities |=
1536 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1537 }
1538
1539 /*
1540 * If we're a i82544 or greater (except i82547), we can do
1541 * TCP segmentation offload.
1542 */
1543 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
1544 ifp->if_capabilities |= IFCAP_TSOv4;
1545 }
1546
1547 if (sc->sc_type >= WM_T_82571) {
1548 ifp->if_capabilities |= IFCAP_TSOv6;
1549 }
1550
1551 /*
1552 * Attach the interface.
1553 */
1554 if_attach(ifp);
1555 ether_ifattach(ifp, enaddr);
1556 #if NRND > 0
1557 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1558 RND_TYPE_NET, 0);
1559 #endif
1560
1561 #ifdef WM_EVENT_COUNTERS
1562 /* Attach event counters. */
1563 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1564 NULL, sc->sc_dev.dv_xname, "txsstall");
1565 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1566 NULL, sc->sc_dev.dv_xname, "txdstall");
1567 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1568 NULL, sc->sc_dev.dv_xname, "txfifo_stall");
1569 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1570 NULL, sc->sc_dev.dv_xname, "txdw");
1571 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1572 NULL, sc->sc_dev.dv_xname, "txqe");
1573 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1574 NULL, sc->sc_dev.dv_xname, "rxintr");
1575 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1576 NULL, sc->sc_dev.dv_xname, "linkintr");
1577
1578 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1579 NULL, sc->sc_dev.dv_xname, "rxipsum");
1580 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1581 NULL, sc->sc_dev.dv_xname, "rxtusum");
1582 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1583 NULL, sc->sc_dev.dv_xname, "txipsum");
1584 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1585 NULL, sc->sc_dev.dv_xname, "txtusum");
1586 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1587 NULL, sc->sc_dev.dv_xname, "txtusum6");
1588
1589 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1590 NULL, sc->sc_dev.dv_xname, "txtso");
1591 evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
1592 NULL, sc->sc_dev.dv_xname, "txtso6");
1593 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1594 NULL, sc->sc_dev.dv_xname, "txtsopain");
1595
1596 for (i = 0; i < WM_NTXSEGS; i++) {
1597 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1598 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1599 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1600 }
1601
1602 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1603 NULL, sc->sc_dev.dv_xname, "txdrop");
1604
1605 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1606 NULL, sc->sc_dev.dv_xname, "tu");
1607
1608 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1609 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1610 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1611 NULL, sc->sc_dev.dv_xname, "tx_xon");
1612 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1613 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1614 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1615 NULL, sc->sc_dev.dv_xname, "rx_xon");
1616 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1617 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1618 #endif /* WM_EVENT_COUNTERS */
1619
1620 /*
1621 * Make sure the interface is shutdown during reboot.
1622 */
1623 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1624 if (sc->sc_sdhook == NULL)
1625 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1626 sc->sc_dev.dv_xname);
1627
1628 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
1629 wm_powerhook, sc);
1630 if (sc->sc_powerhook == NULL)
1631 aprint_error("%s: can't establish powerhook\n",
1632 sc->sc_dev.dv_xname);
1633 return;
1634
1635 /*
1636 * Free any resources we've allocated during the failed attach
1637 * attempt. Do this in reverse order and fall through.
1638 */
1639 fail_5:
1640 for (i = 0; i < WM_NRXDESC; i++) {
1641 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1642 bus_dmamap_destroy(sc->sc_dmat,
1643 sc->sc_rxsoft[i].rxs_dmamap);
1644 }
1645 fail_4:
1646 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1647 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1648 bus_dmamap_destroy(sc->sc_dmat,
1649 sc->sc_txsoft[i].txs_dmamap);
1650 }
1651 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1652 fail_3:
1653 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1654 fail_2:
1655 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
1656 cdata_size);
1657 fail_1:
1658 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1659 fail_0:
1660 return;
1661 }
1662
1663 /*
1664 * wm_shutdown:
1665 *
1666 * Make sure the interface is stopped at reboot time.
1667 */
1668 static void
1669 wm_shutdown(void *arg)
1670 {
1671 struct wm_softc *sc = arg;
1672
1673 wm_stop(&sc->sc_ethercom.ec_if, 1);
1674 }
1675
1676 static void
1677 wm_powerhook(int why, void *arg)
1678 {
1679 struct wm_softc *sc = arg;
1680 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1681 pci_chipset_tag_t pc = sc->sc_pc;
1682 pcitag_t tag = sc->sc_pcitag;
1683
1684 switch (why) {
1685 case PWR_SOFTSUSPEND:
1686 wm_shutdown(sc);
1687 break;
1688 case PWR_SOFTRESUME:
1689 ifp->if_flags &= ~IFF_RUNNING;
1690 wm_init(ifp);
1691 if (ifp->if_flags & IFF_RUNNING)
1692 wm_start(ifp);
1693 break;
1694 case PWR_SUSPEND:
1695 pci_conf_capture(pc, tag, &sc->sc_pciconf);
1696 break;
1697 case PWR_RESUME:
1698 pci_conf_restore(pc, tag, &sc->sc_pciconf);
1699 break;
1700 }
1701
1702 return;
1703 }
1704
1705 /*
1706 * wm_tx_offload:
1707 *
1708 * Set up TCP/IP checksumming parameters for the
1709 * specified packet.
1710 */
1711 static int
1712 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1713 uint8_t *fieldsp)
1714 {
1715 struct mbuf *m0 = txs->txs_mbuf;
1716 struct livengood_tcpip_ctxdesc *t;
1717 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1718 uint32_t ipcse;
1719 struct ether_header *eh;
1720 int offset, iphl;
1721 uint8_t fields;
1722
1723 /*
1724 * XXX It would be nice if the mbuf pkthdr had offset
1725 * fields for the protocol headers.
1726 */
1727
1728 eh = mtod(m0, struct ether_header *);
1729 switch (htons(eh->ether_type)) {
1730 case ETHERTYPE_IP:
1731 case ETHERTYPE_IPV6:
1732 offset = ETHER_HDR_LEN;
1733 break;
1734
1735 case ETHERTYPE_VLAN:
1736 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1737 break;
1738
1739 default:
1740 /*
1741 * Don't support this protocol or encapsulation.
1742 */
1743 *fieldsp = 0;
1744 *cmdp = 0;
1745 return (0);
1746 }
1747
1748 if ((m0->m_pkthdr.csum_flags &
1749 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1750 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1751 } else {
1752 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1753 }
1754 ipcse = offset + iphl - 1;
1755
1756 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1757 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1758 seg = 0;
1759 fields = 0;
1760
1761 if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
1762 int hlen = offset + iphl;
1763 bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1764
1765 if (__predict_false(m0->m_len <
1766 (hlen + sizeof(struct tcphdr)))) {
1767 /*
1768 * TCP/IP headers are not in the first mbuf; we need
1769 * to do this the slow and painful way. Let's just
1770 * hope this doesn't happen very often.
1771 */
1772 struct tcphdr th;
1773
1774 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1775
1776 m_copydata(m0, hlen, sizeof(th), &th);
1777 if (v4) {
1778 struct ip ip;
1779
1780 m_copydata(m0, offset, sizeof(ip), &ip);
1781 ip.ip_len = 0;
1782 m_copyback(m0,
1783 offset + offsetof(struct ip, ip_len),
1784 sizeof(ip.ip_len), &ip.ip_len);
1785 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1786 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1787 } else {
1788 struct ip6_hdr ip6;
1789
1790 m_copydata(m0, offset, sizeof(ip6), &ip6);
1791 ip6.ip6_plen = 0;
1792 m_copyback(m0,
1793 offset + offsetof(struct ip6_hdr, ip6_plen),
1794 sizeof(ip6.ip6_plen), &ip6.ip6_plen);
1795 th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
1796 &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
1797 }
1798 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1799 sizeof(th.th_sum), &th.th_sum);
1800
1801 hlen += th.th_off << 2;
1802 } else {
1803 /*
1804 * TCP/IP headers are in the first mbuf; we can do
1805 * this the easy way.
1806 */
1807 struct tcphdr *th;
1808
1809 if (v4) {
1810 struct ip *ip =
1811 (void *)(mtod(m0, char *) + offset);
1812 th = (void *)(mtod(m0, char *) + hlen);
1813
1814 ip->ip_len = 0;
1815 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1816 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1817 } else {
1818 struct ip6_hdr *ip6 =
1819 (void *)(mtod(m0, char *) + offset);
1820 th = (void *)(mtod(m0, char *) + hlen);
1821
1822 ip6->ip6_plen = 0;
1823 th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
1824 &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
1825 }
1826 hlen += th->th_off << 2;
1827 }
1828
1829 if (v4) {
1830 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1831 cmdlen |= WTX_TCPIP_CMD_IP;
1832 } else {
1833 WM_EVCNT_INCR(&sc->sc_ev_txtso6);
1834 ipcse = 0;
1835 }
1836 cmd |= WTX_TCPIP_CMD_TSE;
1837 cmdlen |= WTX_TCPIP_CMD_TSE |
1838 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1839 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1840 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1841 }
1842
1843 /*
1844 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1845 * offload feature, if we load the context descriptor, we
1846 * MUST provide valid values for IPCSS and TUCSS fields.
1847 */
1848
1849 ipcs = WTX_TCPIP_IPCSS(offset) |
1850 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1851 WTX_TCPIP_IPCSE(ipcse);
1852 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1853 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1854 fields |= WTX_IXSM;
1855 }
1856
1857 offset += iphl;
1858
1859 if (m0->m_pkthdr.csum_flags &
1860 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1861 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1862 fields |= WTX_TXSM;
1863 tucs = WTX_TCPIP_TUCSS(offset) |
1864 WTX_TCPIP_TUCSO(offset +
1865 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1866 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1867 } else if ((m0->m_pkthdr.csum_flags &
1868 (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
1869 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1870 fields |= WTX_TXSM;
1871 tucs = WTX_TCPIP_TUCSS(offset) |
1872 WTX_TCPIP_TUCSO(offset +
1873 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1874 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1875 } else {
1876 /* Just initialize it to a valid TCP context. */
1877 tucs = WTX_TCPIP_TUCSS(offset) |
1878 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1879 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1880 }
1881
1882 /* Fill in the context descriptor. */
1883 t = (struct livengood_tcpip_ctxdesc *)
1884 &sc->sc_txdescs[sc->sc_txnext];
1885 t->tcpip_ipcs = htole32(ipcs);
1886 t->tcpip_tucs = htole32(tucs);
1887 t->tcpip_cmdlen = htole32(cmdlen);
1888 t->tcpip_seg = htole32(seg);
1889 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1890
1891 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1892 txs->txs_ndesc++;
1893
1894 *cmdp = cmd;
1895 *fieldsp = fields;
1896
1897 return (0);
1898 }
1899
1900 static void
1901 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1902 {
1903 struct mbuf *m;
1904 int i;
1905
1906 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
1907 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1908 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1909 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
1910 m->m_data, m->m_len, m->m_flags);
1911 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
1912 i, i == 1 ? "" : "s");
1913 }
1914
1915 /*
1916 * wm_82547_txfifo_stall:
1917 *
1918 * Callout used to wait for the 82547 Tx FIFO to drain,
1919 * reset the FIFO pointers, and restart packet transmission.
1920 */
1921 static void
1922 wm_82547_txfifo_stall(void *arg)
1923 {
1924 struct wm_softc *sc = arg;
1925 int s;
1926
1927 s = splnet();
1928
1929 if (sc->sc_txfifo_stall) {
1930 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1931 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1932 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1933 /*
1934 * Packets have drained. Stop transmitter, reset
1935 * FIFO pointers, restart transmitter, and kick
1936 * the packet queue.
1937 */
1938 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1939 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1940 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1941 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1942 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1943 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1944 CSR_WRITE(sc, WMREG_TCTL, tctl);
1945 CSR_WRITE_FLUSH(sc);
1946
1947 sc->sc_txfifo_head = 0;
1948 sc->sc_txfifo_stall = 0;
1949 wm_start(&sc->sc_ethercom.ec_if);
1950 } else {
1951 /*
1952 * Still waiting for packets to drain; try again in
1953 * another tick.
1954 */
1955 callout_schedule(&sc->sc_txfifo_ch, 1);
1956 }
1957 }
1958
1959 splx(s);
1960 }
1961
1962 /*
1963 * wm_82547_txfifo_bugchk:
1964 *
1965 * Check for bug condition in the 82547 Tx FIFO. We need to
1966 * prevent enqueueing a packet that would wrap around the end
1967 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1968 *
1969 * We do this by checking the amount of space before the end
1970 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1971 * the Tx FIFO, wait for all remaining packets to drain, reset
1972 * the internal FIFO pointers to the beginning, and restart
1973 * transmission on the interface.
1974 */
1975 #define WM_FIFO_HDR 0x10
1976 #define WM_82547_PAD_LEN 0x3e0
1977 static int
1978 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1979 {
1980 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1981 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1982
1983 /* Just return if already stalled. */
1984 if (sc->sc_txfifo_stall)
1985 return (1);
1986
1987 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1988 /* Stall only occurs in half-duplex mode. */
1989 goto send_packet;
1990 }
1991
1992 if (len >= WM_82547_PAD_LEN + space) {
1993 sc->sc_txfifo_stall = 1;
1994 callout_schedule(&sc->sc_txfifo_ch, 1);
1995 return (1);
1996 }
1997
1998 send_packet:
1999 sc->sc_txfifo_head += len;
2000 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2001 sc->sc_txfifo_head -= sc->sc_txfifo_size;
2002
2003 return (0);
2004 }
2005
2006 /*
2007 * wm_start: [ifnet interface function]
2008 *
2009 * Start packet transmission on the interface.
2010 */
2011 static void
2012 wm_start(struct ifnet *ifp)
2013 {
2014 struct wm_softc *sc = ifp->if_softc;
2015 struct mbuf *m0;
2016 #if 0 /* XXXJRT */
2017 struct m_tag *mtag;
2018 #endif
2019 struct wm_txsoft *txs;
2020 bus_dmamap_t dmamap;
2021 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2022 bus_addr_t curaddr;
2023 bus_size_t seglen, curlen;
2024 uint32_t cksumcmd;
2025 uint8_t cksumfields;
2026
2027 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2028 return;
2029
2030 /*
2031 * Remember the previous number of free descriptors.
2032 */
2033 ofree = sc->sc_txfree;
2034
2035 /*
2036 * Loop through the send queue, setting up transmit descriptors
2037 * until we drain the queue, or use up all available transmit
2038 * descriptors.
2039 */
2040 for (;;) {
2041 /* Grab a packet off the queue. */
2042 IFQ_POLL(&ifp->if_snd, m0);
2043 if (m0 == NULL)
2044 break;
2045
2046 DPRINTF(WM_DEBUG_TX,
2047 ("%s: TX: have packet to transmit: %p\n",
2048 sc->sc_dev.dv_xname, m0));
2049
2050 /* Get a work queue entry. */
2051 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2052 wm_txintr(sc);
2053 if (sc->sc_txsfree == 0) {
2054 DPRINTF(WM_DEBUG_TX,
2055 ("%s: TX: no free job descriptors\n",
2056 sc->sc_dev.dv_xname));
2057 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2058 break;
2059 }
2060 }
2061
2062 txs = &sc->sc_txsoft[sc->sc_txsnext];
2063 dmamap = txs->txs_dmamap;
2064
2065 use_tso = (m0->m_pkthdr.csum_flags &
2066 (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2067
2068 /*
2069 * So says the Linux driver:
2070 * The controller does a simple calculation to make sure
2071 * there is enough room in the FIFO before initiating the
2072 * DMA for each buffer. The calc is:
2073 * 4 = ceil(buffer len / MSS)
2074 * To make sure we don't overrun the FIFO, adjust the max
2075 * buffer len if the MSS drops.
2076 */
2077 dmamap->dm_maxsegsz =
2078 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2079 ? m0->m_pkthdr.segsz << 2
2080 : WTX_MAX_LEN;
2081
2082 /*
2083 * Load the DMA map. If this fails, the packet either
2084 * didn't fit in the allotted number of segments, or we
2085 * were short on resources. For the too-many-segments
2086 * case, we simply report an error and drop the packet,
2087 * since we can't sanely copy a jumbo packet to a single
2088 * buffer.
2089 */
2090 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2091 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2092 if (error) {
2093 if (error == EFBIG) {
2094 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2095 log(LOG_ERR, "%s: Tx packet consumes too many "
2096 "DMA segments, dropping...\n",
2097 sc->sc_dev.dv_xname);
2098 IFQ_DEQUEUE(&ifp->if_snd, m0);
2099 wm_dump_mbuf_chain(sc, m0);
2100 m_freem(m0);
2101 continue;
2102 }
2103 /*
2104 * Short on resources, just stop for now.
2105 */
2106 DPRINTF(WM_DEBUG_TX,
2107 ("%s: TX: dmamap load failed: %d\n",
2108 sc->sc_dev.dv_xname, error));
2109 break;
2110 }
2111
2112 segs_needed = dmamap->dm_nsegs;
2113 if (use_tso) {
2114 /* For sentinel descriptor; see below. */
2115 segs_needed++;
2116 }
2117
2118 /*
2119 * Ensure we have enough descriptors free to describe
2120 * the packet. Note, we always reserve one descriptor
2121 * at the end of the ring due to the semantics of the
2122 * TDT register, plus one more in the event we need
2123 * to load offload context.
2124 */
2125 if (segs_needed > sc->sc_txfree - 2) {
2126 /*
2127 * Not enough free descriptors to transmit this
2128 * packet. We haven't committed anything yet,
2129 * so just unload the DMA map, put the packet
2130 * pack on the queue, and punt. Notify the upper
2131 * layer that there are no more slots left.
2132 */
2133 DPRINTF(WM_DEBUG_TX,
2134 ("%s: TX: need %d (%d) descriptors, have %d\n",
2135 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
2136 sc->sc_txfree - 1));
2137 ifp->if_flags |= IFF_OACTIVE;
2138 bus_dmamap_unload(sc->sc_dmat, dmamap);
2139 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2140 break;
2141 }
2142
2143 /*
2144 * Check for 82547 Tx FIFO bug. We need to do this
2145 * once we know we can transmit the packet, since we
2146 * do some internal FIFO space accounting here.
2147 */
2148 if (sc->sc_type == WM_T_82547 &&
2149 wm_82547_txfifo_bugchk(sc, m0)) {
2150 DPRINTF(WM_DEBUG_TX,
2151 ("%s: TX: 82547 Tx FIFO bug detected\n",
2152 sc->sc_dev.dv_xname));
2153 ifp->if_flags |= IFF_OACTIVE;
2154 bus_dmamap_unload(sc->sc_dmat, dmamap);
2155 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2156 break;
2157 }
2158
2159 IFQ_DEQUEUE(&ifp->if_snd, m0);
2160
2161 /*
2162 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2163 */
2164
2165 DPRINTF(WM_DEBUG_TX,
2166 ("%s: TX: packet has %d (%d) DMA segments\n",
2167 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
2168
2169 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2170
2171 /*
2172 * Store a pointer to the packet so that we can free it
2173 * later.
2174 *
2175 * Initially, we consider the number of descriptors the
2176 * packet uses the number of DMA segments. This may be
2177 * incremented by 1 if we do checksum offload (a descriptor
2178 * is used to set the checksum context).
2179 */
2180 txs->txs_mbuf = m0;
2181 txs->txs_firstdesc = sc->sc_txnext;
2182 txs->txs_ndesc = segs_needed;
2183
2184 /* Set up offload parameters for this packet. */
2185 if (m0->m_pkthdr.csum_flags &
2186 (M_CSUM_TSOv4|M_CSUM_TSOv6|
2187 M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2188 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2189 if (wm_tx_offload(sc, txs, &cksumcmd,
2190 &cksumfields) != 0) {
2191 /* Error message already displayed. */
2192 bus_dmamap_unload(sc->sc_dmat, dmamap);
2193 continue;
2194 }
2195 } else {
2196 cksumcmd = 0;
2197 cksumfields = 0;
2198 }
2199
2200 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2201
2202 /* Sync the DMA map. */
2203 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2204 BUS_DMASYNC_PREWRITE);
2205
2206 /*
2207 * Initialize the transmit descriptor.
2208 */
2209 for (nexttx = sc->sc_txnext, seg = 0;
2210 seg < dmamap->dm_nsegs; seg++) {
2211 for (seglen = dmamap->dm_segs[seg].ds_len,
2212 curaddr = dmamap->dm_segs[seg].ds_addr;
2213 seglen != 0;
2214 curaddr += curlen, seglen -= curlen,
2215 nexttx = WM_NEXTTX(sc, nexttx)) {
2216 curlen = seglen;
2217
2218 /*
2219 * So says the Linux driver:
2220 * Work around for premature descriptor
2221 * write-backs in TSO mode. Append a
2222 * 4-byte sentinel descriptor.
2223 */
2224 if (use_tso &&
2225 seg == dmamap->dm_nsegs - 1 &&
2226 curlen > 8)
2227 curlen -= 4;
2228
2229 wm_set_dma_addr(
2230 &sc->sc_txdescs[nexttx].wtx_addr,
2231 curaddr);
2232 sc->sc_txdescs[nexttx].wtx_cmdlen =
2233 htole32(cksumcmd | curlen);
2234 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2235 0;
2236 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2237 cksumfields;
2238 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2239 lasttx = nexttx;
2240
2241 DPRINTF(WM_DEBUG_TX,
2242 ("%s: TX: desc %d: low 0x%08lx, "
2243 "len 0x%04x\n",
2244 sc->sc_dev.dv_xname, nexttx,
2245 curaddr & 0xffffffffUL, (unsigned)curlen));
2246 }
2247 }
2248
2249 KASSERT(lasttx != -1);
2250
2251 /*
2252 * Set up the command byte on the last descriptor of
2253 * the packet. If we're in the interrupt delay window,
2254 * delay the interrupt.
2255 */
2256 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2257 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2258
2259 #if 0 /* XXXJRT */
2260 /*
2261 * If VLANs are enabled and the packet has a VLAN tag, set
2262 * up the descriptor to encapsulate the packet for us.
2263 *
2264 * This is only valid on the last descriptor of the packet.
2265 */
2266 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2267 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2268 htole32(WTX_CMD_VLE);
2269 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2270 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2271 }
2272 #endif /* XXXJRT */
2273
2274 txs->txs_lastdesc = lasttx;
2275
2276 DPRINTF(WM_DEBUG_TX,
2277 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
2278 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2279
2280 /* Sync the descriptors we're using. */
2281 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2282 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2283
2284 /* Give the packet to the chip. */
2285 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2286
2287 DPRINTF(WM_DEBUG_TX,
2288 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
2289
2290 DPRINTF(WM_DEBUG_TX,
2291 ("%s: TX: finished transmitting packet, job %d\n",
2292 sc->sc_dev.dv_xname, sc->sc_txsnext));
2293
2294 /* Advance the tx pointer. */
2295 sc->sc_txfree -= txs->txs_ndesc;
2296 sc->sc_txnext = nexttx;
2297
2298 sc->sc_txsfree--;
2299 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2300
2301 #if NBPFILTER > 0
2302 /* Pass the packet to any BPF listeners. */
2303 if (ifp->if_bpf)
2304 bpf_mtap(ifp->if_bpf, m0);
2305 #endif /* NBPFILTER > 0 */
2306 }
2307
2308 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2309 /* No more slots; notify upper layer. */
2310 ifp->if_flags |= IFF_OACTIVE;
2311 }
2312
2313 if (sc->sc_txfree != ofree) {
2314 /* Set a watchdog timer in case the chip flakes out. */
2315 ifp->if_timer = 5;
2316 }
2317 }
2318
2319 /*
2320 * wm_watchdog: [ifnet interface function]
2321 *
2322 * Watchdog timer handler.
2323 */
2324 static void
2325 wm_watchdog(struct ifnet *ifp)
2326 {
2327 struct wm_softc *sc = ifp->if_softc;
2328
2329 /*
2330 * Since we're using delayed interrupts, sweep up
2331 * before we report an error.
2332 */
2333 wm_txintr(sc);
2334
2335 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2336 log(LOG_ERR,
2337 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2338 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
2339 sc->sc_txnext);
2340 ifp->if_oerrors++;
2341
2342 /* Reset the interface. */
2343 (void) wm_init(ifp);
2344 }
2345
2346 /* Try to get more packets going. */
2347 wm_start(ifp);
2348 }
2349
2350 /*
2351 * wm_ioctl: [ifnet interface function]
2352 *
2353 * Handle control requests from the operator.
2354 */
2355 static int
2356 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2357 {
2358 struct wm_softc *sc = ifp->if_softc;
2359 struct ifreq *ifr = (struct ifreq *) data;
2360 int s, error;
2361
2362 s = splnet();
2363
2364 switch (cmd) {
2365 case SIOCSIFMEDIA:
2366 case SIOCGIFMEDIA:
2367 /* Flow control requires full-duplex mode. */
2368 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2369 (ifr->ifr_media & IFM_FDX) == 0)
2370 ifr->ifr_media &= ~IFM_ETH_FMASK;
2371 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2372 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2373 /* We can do both TXPAUSE and RXPAUSE. */
2374 ifr->ifr_media |=
2375 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2376 }
2377 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2378 }
2379 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2380 break;
2381 default:
2382 error = ether_ioctl(ifp, cmd, data);
2383 if (error == ENETRESET) {
2384 /*
2385 * Multicast list has changed; set the hardware filter
2386 * accordingly.
2387 */
2388 if (ifp->if_flags & IFF_RUNNING)
2389 wm_set_filter(sc);
2390 error = 0;
2391 }
2392 break;
2393 }
2394
2395 /* Try to get more packets going. */
2396 wm_start(ifp);
2397
2398 splx(s);
2399 return (error);
2400 }
2401
2402 /*
2403 * wm_intr:
2404 *
2405 * Interrupt service routine.
2406 */
2407 static int
2408 wm_intr(void *arg)
2409 {
2410 struct wm_softc *sc = arg;
2411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2412 uint32_t icr;
2413 int handled = 0;
2414
2415 while (1 /* CONSTCOND */) {
2416 icr = CSR_READ(sc, WMREG_ICR);
2417 if ((icr & sc->sc_icr) == 0)
2418 break;
2419 #if 0 /*NRND > 0*/
2420 if (RND_ENABLED(&sc->rnd_source))
2421 rnd_add_uint32(&sc->rnd_source, icr);
2422 #endif
2423
2424 handled = 1;
2425
2426 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2427 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2428 DPRINTF(WM_DEBUG_RX,
2429 ("%s: RX: got Rx intr 0x%08x\n",
2430 sc->sc_dev.dv_xname,
2431 icr & (ICR_RXDMT0|ICR_RXT0)));
2432 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2433 }
2434 #endif
2435 wm_rxintr(sc);
2436
2437 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2438 if (icr & ICR_TXDW) {
2439 DPRINTF(WM_DEBUG_TX,
2440 ("%s: TX: got TXDW interrupt\n",
2441 sc->sc_dev.dv_xname));
2442 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2443 }
2444 #endif
2445 wm_txintr(sc);
2446
2447 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2448 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2449 wm_linkintr(sc, icr);
2450 }
2451
2452 if (icr & ICR_RXO) {
2453 ifp->if_ierrors++;
2454 #if defined(WM_DEBUG)
2455 log(LOG_WARNING, "%s: Receive overrun\n",
2456 sc->sc_dev.dv_xname);
2457 #endif /* defined(WM_DEBUG) */
2458 }
2459 }
2460
2461 if (handled) {
2462 /* Try to get more packets going. */
2463 wm_start(ifp);
2464 }
2465
2466 return (handled);
2467 }
2468
2469 /*
2470 * wm_txintr:
2471 *
2472 * Helper; handle transmit interrupts.
2473 */
2474 static void
2475 wm_txintr(struct wm_softc *sc)
2476 {
2477 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2478 struct wm_txsoft *txs;
2479 uint8_t status;
2480 int i;
2481
2482 ifp->if_flags &= ~IFF_OACTIVE;
2483
2484 /*
2485 * Go through the Tx list and free mbufs for those
2486 * frames which have been transmitted.
2487 */
2488 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2489 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2490 txs = &sc->sc_txsoft[i];
2491
2492 DPRINTF(WM_DEBUG_TX,
2493 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
2494
2495 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2496 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2497
2498 status =
2499 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2500 if ((status & WTX_ST_DD) == 0) {
2501 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2502 BUS_DMASYNC_PREREAD);
2503 break;
2504 }
2505
2506 DPRINTF(WM_DEBUG_TX,
2507 ("%s: TX: job %d done: descs %d..%d\n",
2508 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
2509 txs->txs_lastdesc));
2510
2511 /*
2512 * XXX We should probably be using the statistics
2513 * XXX registers, but I don't know if they exist
2514 * XXX on chips before the i82544.
2515 */
2516
2517 #ifdef WM_EVENT_COUNTERS
2518 if (status & WTX_ST_TU)
2519 WM_EVCNT_INCR(&sc->sc_ev_tu);
2520 #endif /* WM_EVENT_COUNTERS */
2521
2522 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2523 ifp->if_oerrors++;
2524 if (status & WTX_ST_LC)
2525 log(LOG_WARNING, "%s: late collision\n",
2526 sc->sc_dev.dv_xname);
2527 else if (status & WTX_ST_EC) {
2528 ifp->if_collisions += 16;
2529 log(LOG_WARNING, "%s: excessive collisions\n",
2530 sc->sc_dev.dv_xname);
2531 }
2532 } else
2533 ifp->if_opackets++;
2534
2535 sc->sc_txfree += txs->txs_ndesc;
2536 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2537 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2538 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2539 m_freem(txs->txs_mbuf);
2540 txs->txs_mbuf = NULL;
2541 }
2542
2543 /* Update the dirty transmit buffer pointer. */
2544 sc->sc_txsdirty = i;
2545 DPRINTF(WM_DEBUG_TX,
2546 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
2547
2548 /*
2549 * If there are no more pending transmissions, cancel the watchdog
2550 * timer.
2551 */
2552 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2553 ifp->if_timer = 0;
2554 }
2555
2556 /*
2557 * wm_rxintr:
2558 *
2559 * Helper; handle receive interrupts.
2560 */
2561 static void
2562 wm_rxintr(struct wm_softc *sc)
2563 {
2564 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2565 struct wm_rxsoft *rxs;
2566 struct mbuf *m;
2567 int i, len;
2568 uint8_t status, errors;
2569
2570 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2571 rxs = &sc->sc_rxsoft[i];
2572
2573 DPRINTF(WM_DEBUG_RX,
2574 ("%s: RX: checking descriptor %d\n",
2575 sc->sc_dev.dv_xname, i));
2576
2577 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2578
2579 status = sc->sc_rxdescs[i].wrx_status;
2580 errors = sc->sc_rxdescs[i].wrx_errors;
2581 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2582
2583 if ((status & WRX_ST_DD) == 0) {
2584 /*
2585 * We have processed all of the receive descriptors.
2586 */
2587 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2588 break;
2589 }
2590
2591 if (__predict_false(sc->sc_rxdiscard)) {
2592 DPRINTF(WM_DEBUG_RX,
2593 ("%s: RX: discarding contents of descriptor %d\n",
2594 sc->sc_dev.dv_xname, i));
2595 WM_INIT_RXDESC(sc, i);
2596 if (status & WRX_ST_EOP) {
2597 /* Reset our state. */
2598 DPRINTF(WM_DEBUG_RX,
2599 ("%s: RX: resetting rxdiscard -> 0\n",
2600 sc->sc_dev.dv_xname));
2601 sc->sc_rxdiscard = 0;
2602 }
2603 continue;
2604 }
2605
2606 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2607 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2608
2609 m = rxs->rxs_mbuf;
2610
2611 /*
2612 * Add a new receive buffer to the ring, unless of
2613 * course the length is zero. Treat the latter as a
2614 * failed mapping.
2615 */
2616 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2617 /*
2618 * Failed, throw away what we've done so
2619 * far, and discard the rest of the packet.
2620 */
2621 ifp->if_ierrors++;
2622 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2623 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2624 WM_INIT_RXDESC(sc, i);
2625 if ((status & WRX_ST_EOP) == 0)
2626 sc->sc_rxdiscard = 1;
2627 if (sc->sc_rxhead != NULL)
2628 m_freem(sc->sc_rxhead);
2629 WM_RXCHAIN_RESET(sc);
2630 DPRINTF(WM_DEBUG_RX,
2631 ("%s: RX: Rx buffer allocation failed, "
2632 "dropping packet%s\n", sc->sc_dev.dv_xname,
2633 sc->sc_rxdiscard ? " (discard)" : ""));
2634 continue;
2635 }
2636
2637 WM_RXCHAIN_LINK(sc, m);
2638
2639 m->m_len = len;
2640
2641 DPRINTF(WM_DEBUG_RX,
2642 ("%s: RX: buffer at %p len %d\n",
2643 sc->sc_dev.dv_xname, m->m_data, len));
2644
2645 /*
2646 * If this is not the end of the packet, keep
2647 * looking.
2648 */
2649 if ((status & WRX_ST_EOP) == 0) {
2650 sc->sc_rxlen += len;
2651 DPRINTF(WM_DEBUG_RX,
2652 ("%s: RX: not yet EOP, rxlen -> %d\n",
2653 sc->sc_dev.dv_xname, sc->sc_rxlen));
2654 continue;
2655 }
2656
2657 /*
2658 * Okay, we have the entire packet now. The chip is
2659 * configured to include the FCS (not all chips can
2660 * be configured to strip it), so we need to trim it.
2661 */
2662 m->m_len -= ETHER_CRC_LEN;
2663
2664 *sc->sc_rxtailp = NULL;
2665 len = m->m_len + sc->sc_rxlen;
2666 m = sc->sc_rxhead;
2667
2668 WM_RXCHAIN_RESET(sc);
2669
2670 DPRINTF(WM_DEBUG_RX,
2671 ("%s: RX: have entire packet, len -> %d\n",
2672 sc->sc_dev.dv_xname, len));
2673
2674 /*
2675 * If an error occurred, update stats and drop the packet.
2676 */
2677 if (errors &
2678 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2679 ifp->if_ierrors++;
2680 if (errors & WRX_ER_SE)
2681 log(LOG_WARNING, "%s: symbol error\n",
2682 sc->sc_dev.dv_xname);
2683 else if (errors & WRX_ER_SEQ)
2684 log(LOG_WARNING, "%s: receive sequence error\n",
2685 sc->sc_dev.dv_xname);
2686 else if (errors & WRX_ER_CE)
2687 log(LOG_WARNING, "%s: CRC error\n",
2688 sc->sc_dev.dv_xname);
2689 m_freem(m);
2690 continue;
2691 }
2692
2693 /*
2694 * No errors. Receive the packet.
2695 */
2696 m->m_pkthdr.rcvif = ifp;
2697 m->m_pkthdr.len = len;
2698
2699 #if 0 /* XXXJRT */
2700 /*
2701 * If VLANs are enabled, VLAN packets have been unwrapped
2702 * for us. Associate the tag with the packet.
2703 */
2704 if ((status & WRX_ST_VP) != 0) {
2705 VLAN_INPUT_TAG(ifp, m,
2706 le16toh(sc->sc_rxdescs[i].wrx_special,
2707 continue);
2708 }
2709 #endif /* XXXJRT */
2710
2711 /*
2712 * Set up checksum info for this packet.
2713 */
2714 if ((status & WRX_ST_IXSM) == 0) {
2715 if (status & WRX_ST_IPCS) {
2716 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2717 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2718 if (errors & WRX_ER_IPE)
2719 m->m_pkthdr.csum_flags |=
2720 M_CSUM_IPv4_BAD;
2721 }
2722 if (status & WRX_ST_TCPCS) {
2723 /*
2724 * Note: we don't know if this was TCP or UDP,
2725 * so we just set both bits, and expect the
2726 * upper layers to deal.
2727 */
2728 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2729 m->m_pkthdr.csum_flags |=
2730 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2731 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2732 if (errors & WRX_ER_TCPE)
2733 m->m_pkthdr.csum_flags |=
2734 M_CSUM_TCP_UDP_BAD;
2735 }
2736 }
2737
2738 ifp->if_ipackets++;
2739
2740 #if NBPFILTER > 0
2741 /* Pass this up to any BPF listeners. */
2742 if (ifp->if_bpf)
2743 bpf_mtap(ifp->if_bpf, m);
2744 #endif /* NBPFILTER > 0 */
2745
2746 /* Pass it on. */
2747 (*ifp->if_input)(ifp, m);
2748 }
2749
2750 /* Update the receive pointer. */
2751 sc->sc_rxptr = i;
2752
2753 DPRINTF(WM_DEBUG_RX,
2754 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2755 }
2756
2757 /*
2758 * wm_linkintr:
2759 *
2760 * Helper; handle link interrupts.
2761 */
2762 static void
2763 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2764 {
2765 uint32_t status;
2766
2767 /*
2768 * If we get a link status interrupt on a 1000BASE-T
2769 * device, just fall into the normal MII tick path.
2770 */
2771 if (sc->sc_flags & WM_F_HAS_MII) {
2772 if (icr & ICR_LSC) {
2773 DPRINTF(WM_DEBUG_LINK,
2774 ("%s: LINK: LSC -> mii_tick\n",
2775 sc->sc_dev.dv_xname));
2776 mii_tick(&sc->sc_mii);
2777 } else if (icr & ICR_RXSEQ) {
2778 DPRINTF(WM_DEBUG_LINK,
2779 ("%s: LINK Receive sequence error\n",
2780 sc->sc_dev.dv_xname));
2781 }
2782 return;
2783 }
2784
2785 /*
2786 * If we are now receiving /C/, check for link again in
2787 * a couple of link clock ticks.
2788 */
2789 if (icr & ICR_RXCFG) {
2790 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2791 sc->sc_dev.dv_xname));
2792 sc->sc_tbi_anstate = 2;
2793 }
2794
2795 if (icr & ICR_LSC) {
2796 status = CSR_READ(sc, WMREG_STATUS);
2797 if (status & STATUS_LU) {
2798 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2799 sc->sc_dev.dv_xname,
2800 (status & STATUS_FD) ? "FDX" : "HDX"));
2801 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2802 sc->sc_fcrtl &= ~FCRTL_XONE;
2803 if (status & STATUS_FD)
2804 sc->sc_tctl |=
2805 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2806 else
2807 sc->sc_tctl |=
2808 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2809 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2810 sc->sc_fcrtl |= FCRTL_XONE;
2811 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2812 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2813 WMREG_OLD_FCRTL : WMREG_FCRTL,
2814 sc->sc_fcrtl);
2815 sc->sc_tbi_linkup = 1;
2816 } else {
2817 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2818 sc->sc_dev.dv_xname));
2819 sc->sc_tbi_linkup = 0;
2820 }
2821 sc->sc_tbi_anstate = 2;
2822 wm_tbi_set_linkled(sc);
2823 } else if (icr & ICR_RXSEQ) {
2824 DPRINTF(WM_DEBUG_LINK,
2825 ("%s: LINK: Receive sequence error\n",
2826 sc->sc_dev.dv_xname));
2827 }
2828 }
2829
2830 /*
2831 * wm_tick:
2832 *
2833 * One second timer, used to check link status, sweep up
2834 * completed transmit jobs, etc.
2835 */
2836 static void
2837 wm_tick(void *arg)
2838 {
2839 struct wm_softc *sc = arg;
2840 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2841 int s;
2842
2843 s = splnet();
2844
2845 if (sc->sc_type >= WM_T_82542_2_1) {
2846 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2847 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2848 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2849 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2850 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2851 }
2852
2853 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2854 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2855
2856
2857 if (sc->sc_flags & WM_F_HAS_MII)
2858 mii_tick(&sc->sc_mii);
2859 else
2860 wm_tbi_check_link(sc);
2861
2862 splx(s);
2863
2864 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2865 }
2866
2867 /*
2868 * wm_reset:
2869 *
2870 * Reset the i82542 chip.
2871 */
2872 static void
2873 wm_reset(struct wm_softc *sc)
2874 {
2875 uint32_t reg;
2876
2877 /*
2878 * Allocate on-chip memory according to the MTU size.
2879 * The Packet Buffer Allocation register must be written
2880 * before the chip is reset.
2881 */
2882 switch (sc->sc_type) {
2883 case WM_T_82547:
2884 case WM_T_82547_2:
2885 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2886 PBA_22K : PBA_30K;
2887 sc->sc_txfifo_head = 0;
2888 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2889 sc->sc_txfifo_size =
2890 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2891 sc->sc_txfifo_stall = 0;
2892 break;
2893 case WM_T_82571:
2894 case WM_T_82572:
2895 case WM_T_80003:
2896 sc->sc_pba = PBA_32K;
2897 break;
2898 case WM_T_82573:
2899 sc->sc_pba = PBA_12K;
2900 break;
2901 case WM_T_ICH8:
2902 sc->sc_pba = PBA_8K;
2903 CSR_WRITE(sc, WMREG_PBS, PBA_16K);
2904 break;
2905 case WM_T_ICH9:
2906 sc->sc_pba = PBA_10K;
2907 break;
2908 default:
2909 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2910 PBA_40K : PBA_48K;
2911 break;
2912 }
2913 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2914
2915 if (sc->sc_flags & WM_F_PCIE) {
2916 int timeout = 800;
2917
2918 sc->sc_ctrl |= CTRL_GIO_M_DIS;
2919 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2920
2921 while (timeout) {
2922 if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA) == 0)
2923 break;
2924 delay(100);
2925 }
2926 }
2927
2928 /* clear interrupt */
2929 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2930
2931 /*
2932 * 82541 Errata 29? & 82547 Errata 28?
2933 * See also the description about PHY_RST bit in CTRL register
2934 * in 8254x_GBe_SDM.pdf.
2935 */
2936 if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
2937 CSR_WRITE(sc, WMREG_CTRL,
2938 CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
2939 delay(5000);
2940 }
2941
2942 switch (sc->sc_type) {
2943 case WM_T_82544:
2944 case WM_T_82540:
2945 case WM_T_82545:
2946 case WM_T_82546:
2947 case WM_T_82541:
2948 case WM_T_82541_2:
2949 /*
2950 * On some chipsets, a reset through a memory-mapped write
2951 * cycle can cause the chip to reset before completing the
2952 * write cycle. This causes major headache that can be
2953 * avoided by issuing the reset via indirect register writes
2954 * through I/O space.
2955 *
2956 * So, if we successfully mapped the I/O BAR at attach time,
2957 * use that. Otherwise, try our luck with a memory-mapped
2958 * reset.
2959 */
2960 if (sc->sc_flags & WM_F_IOH_VALID)
2961 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2962 else
2963 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2964 break;
2965
2966 case WM_T_82545_3:
2967 case WM_T_82546_3:
2968 /* Use the shadow control register on these chips. */
2969 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2970 break;
2971
2972 case WM_T_ICH8:
2973 case WM_T_ICH9:
2974 wm_get_swfwhw_semaphore(sc);
2975 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST | CTRL_PHY_RESET);
2976 delay(10000);
2977
2978 default:
2979 /* Everything else can safely use the documented method. */
2980 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2981 break;
2982 }
2983 delay(10000);
2984
2985 /* reload EEPROM */
2986 switch(sc->sc_type) {
2987 case WM_T_82542_2_0:
2988 case WM_T_82542_2_1:
2989 case WM_T_82543:
2990 case WM_T_82544:
2991 delay(10);
2992 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
2993 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2994 delay(2000);
2995 break;
2996 case WM_T_82541:
2997 case WM_T_82541_2:
2998 case WM_T_82547:
2999 case WM_T_82547_2:
3000 delay(20000);
3001 break;
3002 case WM_T_82573:
3003 if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3004 delay(10);
3005 reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3006 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3007 }
3008 /* FALLTHROUGH */
3009 default:
3010 /* check EECD_EE_AUTORD */
3011 wm_get_auto_rd_done(sc);
3012 }
3013
3014 #if 0
3015 for (i = 0; i < 1000; i++) {
3016 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0) {
3017 return;
3018 }
3019 delay(20);
3020 }
3021
3022 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
3023 log(LOG_ERR, "%s: reset failed to complete\n",
3024 sc->sc_dev.dv_xname);
3025 #endif
3026 }
3027
3028 /*
3029 * wm_init: [ifnet interface function]
3030 *
3031 * Initialize the interface. Must be called at splnet().
3032 */
3033 static int
3034 wm_init(struct ifnet *ifp)
3035 {
3036 struct wm_softc *sc = ifp->if_softc;
3037 struct wm_rxsoft *rxs;
3038 int i, error = 0;
3039 uint32_t reg;
3040
3041 /*
3042 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
3043 * There is a small but measurable benefit to avoiding the adjusment
3044 * of the descriptor so that the headers are aligned, for normal mtu,
3045 * on such platforms. One possibility is that the DMA itself is
3046 * slightly more efficient if the front of the entire packet (instead
3047 * of the front of the headers) is aligned.
3048 *
3049 * Note we must always set align_tweak to 0 if we are using
3050 * jumbo frames.
3051 */
3052 #ifdef __NO_STRICT_ALIGNMENT
3053 sc->sc_align_tweak = 0;
3054 #else
3055 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
3056 sc->sc_align_tweak = 0;
3057 else
3058 sc->sc_align_tweak = 2;
3059 #endif /* __NO_STRICT_ALIGNMENT */
3060
3061 /* Cancel any pending I/O. */
3062 wm_stop(ifp, 0);
3063
3064 /* update statistics before reset */
3065 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3066 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
3067
3068 /* Reset the chip to a known state. */
3069 wm_reset(sc);
3070
3071 /* Initialize the transmit descriptor ring. */
3072 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
3073 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
3074 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3075 sc->sc_txfree = WM_NTXDESC(sc);
3076 sc->sc_txnext = 0;
3077
3078 if (sc->sc_type < WM_T_82543) {
3079 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
3080 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
3081 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
3082 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
3083 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
3084 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
3085 } else {
3086 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
3087 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
3088 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
3089 CSR_WRITE(sc, WMREG_TDH, 0);
3090 CSR_WRITE(sc, WMREG_TDT, 0);
3091 CSR_WRITE(sc, WMREG_TIDV, 64);
3092 CSR_WRITE(sc, WMREG_TADV, 128);
3093
3094 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
3095 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
3096 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
3097 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
3098 }
3099 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
3100 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
3101
3102 /* Initialize the transmit job descriptors. */
3103 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
3104 sc->sc_txsoft[i].txs_mbuf = NULL;
3105 sc->sc_txsfree = WM_TXQUEUELEN(sc);
3106 sc->sc_txsnext = 0;
3107 sc->sc_txsdirty = 0;
3108
3109 /*
3110 * Initialize the receive descriptor and receive job
3111 * descriptor rings.
3112 */
3113 if (sc->sc_type < WM_T_82543) {
3114 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
3115 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
3116 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
3117 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
3118 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
3119 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
3120
3121 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
3122 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
3123 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
3124 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
3125 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
3126 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
3127 } else {
3128 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
3129 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
3130 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
3131 CSR_WRITE(sc, WMREG_RDH, 0);
3132 CSR_WRITE(sc, WMREG_RDT, 0);
3133 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD);
3134 CSR_WRITE(sc, WMREG_RADV, 128);
3135 }
3136 for (i = 0; i < WM_NRXDESC; i++) {
3137 rxs = &sc->sc_rxsoft[i];
3138 if (rxs->rxs_mbuf == NULL) {
3139 if ((error = wm_add_rxbuf(sc, i)) != 0) {
3140 log(LOG_ERR, "%s: unable to allocate or map rx "
3141 "buffer %d, error = %d\n",
3142 sc->sc_dev.dv_xname, i, error);
3143 /*
3144 * XXX Should attempt to run with fewer receive
3145 * XXX buffers instead of just failing.
3146 */
3147 wm_rxdrain(sc);
3148 goto out;
3149 }
3150 } else
3151 WM_INIT_RXDESC(sc, i);
3152 }
3153 sc->sc_rxptr = 0;
3154 sc->sc_rxdiscard = 0;
3155 WM_RXCHAIN_RESET(sc);
3156
3157 /*
3158 * Clear out the VLAN table -- we don't use it (yet).
3159 */
3160 CSR_WRITE(sc, WMREG_VET, 0);
3161 for (i = 0; i < WM_VLAN_TABSIZE; i++)
3162 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
3163
3164 /*
3165 * Set up flow-control parameters.
3166 *
3167 * XXX Values could probably stand some tuning.
3168 */
3169 if (sc->sc_type != WM_T_ICH8) {
3170 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
3171 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
3172 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
3173 }
3174
3175 sc->sc_fcrtl = FCRTL_DFLT;
3176 if (sc->sc_type < WM_T_82543) {
3177 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
3178 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
3179 } else {
3180 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
3181 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
3182 }
3183 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
3184
3185 #if 0 /* XXXJRT */
3186 /* Deal with VLAN enables. */
3187 if (VLAN_ATTACHED(&sc->sc_ethercom))
3188 sc->sc_ctrl |= CTRL_VME;
3189 else
3190 #endif /* XXXJRT */
3191 sc->sc_ctrl &= ~CTRL_VME;
3192
3193 /* Write the control registers. */
3194 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3195 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
3196 int val;
3197 val = CSR_READ(sc, WMREG_CTRL_EXT);
3198 val &= ~CTRL_EXT_LINK_MODE_MASK;
3199 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
3200
3201 /* Bypass RX and TX FIFO's */
3202 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
3203 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
3204 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
3205
3206 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
3207 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
3208 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
3209 /*
3210 * Set the mac to wait the maximum time between each
3211 * iteration and increase the max iterations when
3212 * polling the phy; this fixes erroneous timeouts at 10Mbps.
3213 */
3214 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
3215 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
3216 val |= 0x3F;
3217 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
3218 }
3219 #if 0
3220 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
3221 #endif
3222
3223 /*
3224 * Set up checksum offload parameters.
3225 */
3226 reg = CSR_READ(sc, WMREG_RXCSUM);
3227 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3228 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3229 reg |= RXCSUM_IPOFL;
3230 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3231 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3232 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3233 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3234 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3235
3236 /*
3237 * Set up the interrupt registers.
3238 */
3239 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3240 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3241 ICR_RXO | ICR_RXT0;
3242 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3243 sc->sc_icr |= ICR_RXCFG;
3244 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3245
3246 /* Set up the inter-packet gap. */
3247 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3248
3249 if (sc->sc_type >= WM_T_82543) {
3250 /* Set up the interrupt throttling register (units of 256ns) */
3251 sc->sc_itr = 1000000000 / (7000 * 256);
3252 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3253 }
3254
3255 #if 0 /* XXXJRT */
3256 /* Set the VLAN ethernetype. */
3257 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3258 #endif
3259
3260 /*
3261 * Set up the transmit control register; we start out with
3262 * a collision distance suitable for FDX, but update it whe
3263 * we resolve the media type.
3264 */
3265 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3266 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3267 if (sc->sc_type >= WM_T_82571)
3268 sc->sc_tctl |= TCTL_MULR;
3269 if (sc->sc_type >= WM_T_80003)
3270 sc->sc_tctl |= TCTL_RTLC;
3271 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3272
3273 /* Set the media. */
3274 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
3275
3276 /*
3277 * Set up the receive control register; we actually program
3278 * the register when we set the receive filter. Use multicast
3279 * address offset type 0.
3280 *
3281 * Only the i82544 has the ability to strip the incoming
3282 * CRC, so we don't enable that feature.
3283 */
3284 sc->sc_mchash_type = 0;
3285 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3286 | RCTL_MO(sc->sc_mchash_type);
3287
3288 /* 82573 doesn't support jumbo frame */
3289 if (sc->sc_type != WM_T_82573 && sc->sc_type != WM_T_ICH8)
3290 sc->sc_rctl |= RCTL_LPE;
3291
3292 if (MCLBYTES == 2048) {
3293 sc->sc_rctl |= RCTL_2k;
3294 } else {
3295 if (sc->sc_type >= WM_T_82543) {
3296 switch(MCLBYTES) {
3297 case 4096:
3298 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3299 break;
3300 case 8192:
3301 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3302 break;
3303 case 16384:
3304 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3305 break;
3306 default:
3307 panic("wm_init: MCLBYTES %d unsupported",
3308 MCLBYTES);
3309 break;
3310 }
3311 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3312 }
3313
3314 /* Set the receive filter. */
3315 wm_set_filter(sc);
3316
3317 /* Start the one second link check clock. */
3318 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3319
3320 /* ...all done! */
3321 ifp->if_flags |= IFF_RUNNING;
3322 ifp->if_flags &= ~IFF_OACTIVE;
3323
3324 out:
3325 if (error)
3326 log(LOG_ERR, "%s: interface not running\n",
3327 sc->sc_dev.dv_xname);
3328 return (error);
3329 }
3330
3331 /*
3332 * wm_rxdrain:
3333 *
3334 * Drain the receive queue.
3335 */
3336 static void
3337 wm_rxdrain(struct wm_softc *sc)
3338 {
3339 struct wm_rxsoft *rxs;
3340 int i;
3341
3342 for (i = 0; i < WM_NRXDESC; i++) {
3343 rxs = &sc->sc_rxsoft[i];
3344 if (rxs->rxs_mbuf != NULL) {
3345 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3346 m_freem(rxs->rxs_mbuf);
3347 rxs->rxs_mbuf = NULL;
3348 }
3349 }
3350 }
3351
3352 /*
3353 * wm_stop: [ifnet interface function]
3354 *
3355 * Stop transmission on the interface.
3356 */
3357 static void
3358 wm_stop(struct ifnet *ifp, int disable)
3359 {
3360 struct wm_softc *sc = ifp->if_softc;
3361 struct wm_txsoft *txs;
3362 int i;
3363
3364 /* Stop the one second clock. */
3365 callout_stop(&sc->sc_tick_ch);
3366
3367 /* Stop the 82547 Tx FIFO stall check timer. */
3368 if (sc->sc_type == WM_T_82547)
3369 callout_stop(&sc->sc_txfifo_ch);
3370
3371 if (sc->sc_flags & WM_F_HAS_MII) {
3372 /* Down the MII. */
3373 mii_down(&sc->sc_mii);
3374 }
3375
3376 /* Stop the transmit and receive processes. */
3377 CSR_WRITE(sc, WMREG_TCTL, 0);
3378 CSR_WRITE(sc, WMREG_RCTL, 0);
3379
3380 /*
3381 * Clear the interrupt mask to ensure the device cannot assert its
3382 * interrupt line.
3383 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3384 * any currently pending or shared interrupt.
3385 */
3386 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3387 sc->sc_icr = 0;
3388
3389 /* Release any queued transmit buffers. */
3390 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3391 txs = &sc->sc_txsoft[i];
3392 if (txs->txs_mbuf != NULL) {
3393 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3394 m_freem(txs->txs_mbuf);
3395 txs->txs_mbuf = NULL;
3396 }
3397 }
3398
3399 if (disable)
3400 wm_rxdrain(sc);
3401
3402 /* Mark the interface as down and cancel the watchdog timer. */
3403 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3404 ifp->if_timer = 0;
3405 }
3406
3407 void
3408 wm_get_auto_rd_done(struct wm_softc *sc)
3409 {
3410 int i;
3411
3412 /* wait for eeprom to reload */
3413 switch (sc->sc_type) {
3414 case WM_T_82571:
3415 case WM_T_82572:
3416 case WM_T_82573:
3417 case WM_T_80003:
3418 case WM_T_ICH8:
3419 case WM_T_ICH9:
3420 for (i = 10; i > 0; i--) {
3421 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3422 break;
3423 delay(1000);
3424 }
3425 if (i == 0) {
3426 log(LOG_ERR, "%s: auto read from eeprom failed to "
3427 "complete\n", sc->sc_dev.dv_xname);
3428 }
3429 break;
3430 default:
3431 delay(5000);
3432 break;
3433 }
3434
3435 /* Phy configuration starts after EECD_AUTO_RD is set */
3436 if (sc->sc_type == WM_T_82573)
3437 delay(25000);
3438 }
3439
3440 /*
3441 * wm_acquire_eeprom:
3442 *
3443 * Perform the EEPROM handshake required on some chips.
3444 */
3445 static int
3446 wm_acquire_eeprom(struct wm_softc *sc)
3447 {
3448 uint32_t reg;
3449 int x;
3450 int ret = 0;
3451
3452 /* always success */
3453 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3454 return 0;
3455
3456 if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
3457 ret = wm_get_swfwhw_semaphore(sc);
3458 } else if (sc->sc_flags & WM_F_SWFW_SYNC) {
3459 /* this will also do wm_get_swsm_semaphore() if needed */
3460 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3461 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3462 ret = wm_get_swsm_semaphore(sc);
3463 }
3464
3465 if (ret)
3466 return 1;
3467
3468 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3469 reg = CSR_READ(sc, WMREG_EECD);
3470
3471 /* Request EEPROM access. */
3472 reg |= EECD_EE_REQ;
3473 CSR_WRITE(sc, WMREG_EECD, reg);
3474
3475 /* ..and wait for it to be granted. */
3476 for (x = 0; x < 1000; x++) {
3477 reg = CSR_READ(sc, WMREG_EECD);
3478 if (reg & EECD_EE_GNT)
3479 break;
3480 delay(5);
3481 }
3482 if ((reg & EECD_EE_GNT) == 0) {
3483 aprint_error("%s: could not acquire EEPROM GNT\n",
3484 sc->sc_dev.dv_xname);
3485 reg &= ~EECD_EE_REQ;
3486 CSR_WRITE(sc, WMREG_EECD, reg);
3487 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3488 wm_put_swfwhw_semaphore(sc);
3489 if (sc->sc_flags & WM_F_SWFW_SYNC)
3490 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3491 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3492 wm_put_swsm_semaphore(sc);
3493 return (1);
3494 }
3495 }
3496
3497 return (0);
3498 }
3499
3500 /*
3501 * wm_release_eeprom:
3502 *
3503 * Release the EEPROM mutex.
3504 */
3505 static void
3506 wm_release_eeprom(struct wm_softc *sc)
3507 {
3508 uint32_t reg;
3509
3510 /* always success */
3511 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3512 return;
3513
3514 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3515 reg = CSR_READ(sc, WMREG_EECD);
3516 reg &= ~EECD_EE_REQ;
3517 CSR_WRITE(sc, WMREG_EECD, reg);
3518 }
3519
3520 if (sc->sc_flags & WM_F_SWFWHW_SYNC)
3521 wm_put_swfwhw_semaphore(sc);
3522 if (sc->sc_flags & WM_F_SWFW_SYNC)
3523 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3524 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3525 wm_put_swsm_semaphore(sc);
3526 }
3527
3528 /*
3529 * wm_eeprom_sendbits:
3530 *
3531 * Send a series of bits to the EEPROM.
3532 */
3533 static void
3534 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3535 {
3536 uint32_t reg;
3537 int x;
3538
3539 reg = CSR_READ(sc, WMREG_EECD);
3540
3541 for (x = nbits; x > 0; x--) {
3542 if (bits & (1U << (x - 1)))
3543 reg |= EECD_DI;
3544 else
3545 reg &= ~EECD_DI;
3546 CSR_WRITE(sc, WMREG_EECD, reg);
3547 delay(2);
3548 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3549 delay(2);
3550 CSR_WRITE(sc, WMREG_EECD, reg);
3551 delay(2);
3552 }
3553 }
3554
3555 /*
3556 * wm_eeprom_recvbits:
3557 *
3558 * Receive a series of bits from the EEPROM.
3559 */
3560 static void
3561 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3562 {
3563 uint32_t reg, val;
3564 int x;
3565
3566 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3567
3568 val = 0;
3569 for (x = nbits; x > 0; x--) {
3570 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3571 delay(2);
3572 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3573 val |= (1U << (x - 1));
3574 CSR_WRITE(sc, WMREG_EECD, reg);
3575 delay(2);
3576 }
3577 *valp = val;
3578 }
3579
3580 /*
3581 * wm_read_eeprom_uwire:
3582 *
3583 * Read a word from the EEPROM using the MicroWire protocol.
3584 */
3585 static int
3586 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3587 {
3588 uint32_t reg, val;
3589 int i;
3590
3591 for (i = 0; i < wordcnt; i++) {
3592 /* Clear SK and DI. */
3593 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3594 CSR_WRITE(sc, WMREG_EECD, reg);
3595
3596 /* Set CHIP SELECT. */
3597 reg |= EECD_CS;
3598 CSR_WRITE(sc, WMREG_EECD, reg);
3599 delay(2);
3600
3601 /* Shift in the READ command. */
3602 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3603
3604 /* Shift in address. */
3605 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3606
3607 /* Shift out the data. */
3608 wm_eeprom_recvbits(sc, &val, 16);
3609 data[i] = val & 0xffff;
3610
3611 /* Clear CHIP SELECT. */
3612 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3613 CSR_WRITE(sc, WMREG_EECD, reg);
3614 delay(2);
3615 }
3616
3617 return (0);
3618 }
3619
3620 /*
3621 * wm_spi_eeprom_ready:
3622 *
3623 * Wait for a SPI EEPROM to be ready for commands.
3624 */
3625 static int
3626 wm_spi_eeprom_ready(struct wm_softc *sc)
3627 {
3628 uint32_t val;
3629 int usec;
3630
3631 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3632 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3633 wm_eeprom_recvbits(sc, &val, 8);
3634 if ((val & SPI_SR_RDY) == 0)
3635 break;
3636 }
3637 if (usec >= SPI_MAX_RETRIES) {
3638 aprint_error("%s: EEPROM failed to become ready\n",
3639 sc->sc_dev.dv_xname);
3640 return (1);
3641 }
3642 return (0);
3643 }
3644
3645 /*
3646 * wm_read_eeprom_spi:
3647 *
3648 * Read a work from the EEPROM using the SPI protocol.
3649 */
3650 static int
3651 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3652 {
3653 uint32_t reg, val;
3654 int i;
3655 uint8_t opc;
3656
3657 /* Clear SK and CS. */
3658 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3659 CSR_WRITE(sc, WMREG_EECD, reg);
3660 delay(2);
3661
3662 if (wm_spi_eeprom_ready(sc))
3663 return (1);
3664
3665 /* Toggle CS to flush commands. */
3666 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3667 delay(2);
3668 CSR_WRITE(sc, WMREG_EECD, reg);
3669 delay(2);
3670
3671 opc = SPI_OPC_READ;
3672 if (sc->sc_ee_addrbits == 8 && word >= 128)
3673 opc |= SPI_OPC_A8;
3674
3675 wm_eeprom_sendbits(sc, opc, 8);
3676 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3677
3678 for (i = 0; i < wordcnt; i++) {
3679 wm_eeprom_recvbits(sc, &val, 16);
3680 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3681 }
3682
3683 /* Raise CS and clear SK. */
3684 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3685 CSR_WRITE(sc, WMREG_EECD, reg);
3686 delay(2);
3687
3688 return (0);
3689 }
3690
3691 #define EEPROM_CHECKSUM 0xBABA
3692 #define EEPROM_SIZE 0x0040
3693
3694 /*
3695 * wm_validate_eeprom_checksum
3696 *
3697 * The checksum is defined as the sum of the first 64 (16 bit) words.
3698 */
3699 static int
3700 wm_validate_eeprom_checksum(struct wm_softc *sc)
3701 {
3702 uint16_t checksum;
3703 uint16_t eeprom_data;
3704 int i;
3705
3706 checksum = 0;
3707
3708 for (i = 0; i < EEPROM_SIZE; i++) {
3709 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3710 return 1;
3711 checksum += eeprom_data;
3712 }
3713
3714 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3715 return 1;
3716
3717 return 0;
3718 }
3719
3720 /*
3721 * wm_read_eeprom:
3722 *
3723 * Read data from the serial EEPROM.
3724 */
3725 static int
3726 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3727 {
3728 int rv;
3729
3730 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3731 return 1;
3732
3733 if (wm_acquire_eeprom(sc))
3734 return 1;
3735
3736 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3737 rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
3738 else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3739 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3740 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3741 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3742 else
3743 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3744
3745 wm_release_eeprom(sc);
3746 return rv;
3747 }
3748
3749 static int
3750 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3751 uint16_t *data)
3752 {
3753 int i, eerd = 0;
3754 int error = 0;
3755
3756 for (i = 0; i < wordcnt; i++) {
3757 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3758
3759 CSR_WRITE(sc, WMREG_EERD, eerd);
3760 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3761 if (error != 0)
3762 break;
3763
3764 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3765 }
3766
3767 return error;
3768 }
3769
3770 static int
3771 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3772 {
3773 uint32_t attempts = 100000;
3774 uint32_t i, reg = 0;
3775 int32_t done = -1;
3776
3777 for (i = 0; i < attempts; i++) {
3778 reg = CSR_READ(sc, rw);
3779
3780 if (reg & EERD_DONE) {
3781 done = 0;
3782 break;
3783 }
3784 delay(5);
3785 }
3786
3787 return done;
3788 }
3789
3790 /*
3791 * wm_add_rxbuf:
3792 *
3793 * Add a receive buffer to the indiciated descriptor.
3794 */
3795 static int
3796 wm_add_rxbuf(struct wm_softc *sc, int idx)
3797 {
3798 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3799 struct mbuf *m;
3800 int error;
3801
3802 MGETHDR(m, M_DONTWAIT, MT_DATA);
3803 if (m == NULL)
3804 return (ENOBUFS);
3805
3806 MCLGET(m, M_DONTWAIT);
3807 if ((m->m_flags & M_EXT) == 0) {
3808 m_freem(m);
3809 return (ENOBUFS);
3810 }
3811
3812 if (rxs->rxs_mbuf != NULL)
3813 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3814
3815 rxs->rxs_mbuf = m;
3816
3817 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3818 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3819 BUS_DMA_READ|BUS_DMA_NOWAIT);
3820 if (error) {
3821 /* XXX XXX XXX */
3822 printf("%s: unable to load rx DMA map %d, error = %d\n",
3823 sc->sc_dev.dv_xname, idx, error);
3824 panic("wm_add_rxbuf");
3825 }
3826
3827 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3828 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3829
3830 WM_INIT_RXDESC(sc, idx);
3831
3832 return (0);
3833 }
3834
3835 /*
3836 * wm_set_ral:
3837 *
3838 * Set an entery in the receive address list.
3839 */
3840 static void
3841 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3842 {
3843 uint32_t ral_lo, ral_hi;
3844
3845 if (enaddr != NULL) {
3846 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3847 (enaddr[3] << 24);
3848 ral_hi = enaddr[4] | (enaddr[5] << 8);
3849 ral_hi |= RAL_AV;
3850 } else {
3851 ral_lo = 0;
3852 ral_hi = 0;
3853 }
3854
3855 if (sc->sc_type >= WM_T_82544) {
3856 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3857 ral_lo);
3858 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3859 ral_hi);
3860 } else {
3861 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3862 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3863 }
3864 }
3865
3866 /*
3867 * wm_mchash:
3868 *
3869 * Compute the hash of the multicast address for the 4096-bit
3870 * multicast filter.
3871 */
3872 static uint32_t
3873 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3874 {
3875 static const int lo_shift[4] = { 4, 3, 2, 0 };
3876 static const int hi_shift[4] = { 4, 5, 6, 8 };
3877 static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3878 static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3879 uint32_t hash;
3880
3881 if (sc->sc_type == WM_T_ICH8) {
3882 hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3883 (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3884 return (hash & 0x3ff);
3885 }
3886 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3887 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3888
3889 return (hash & 0xfff);
3890 }
3891
3892 /*
3893 * wm_set_filter:
3894 *
3895 * Set up the receive filter.
3896 */
3897 static void
3898 wm_set_filter(struct wm_softc *sc)
3899 {
3900 struct ethercom *ec = &sc->sc_ethercom;
3901 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3902 struct ether_multi *enm;
3903 struct ether_multistep step;
3904 bus_addr_t mta_reg;
3905 uint32_t hash, reg, bit;
3906 int i, size;
3907
3908 if (sc->sc_type >= WM_T_82544)
3909 mta_reg = WMREG_CORDOVA_MTA;
3910 else
3911 mta_reg = WMREG_MTA;
3912
3913 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3914
3915 if (ifp->if_flags & IFF_BROADCAST)
3916 sc->sc_rctl |= RCTL_BAM;
3917 if (ifp->if_flags & IFF_PROMISC) {
3918 sc->sc_rctl |= RCTL_UPE;
3919 goto allmulti;
3920 }
3921
3922 /*
3923 * Set the station address in the first RAL slot, and
3924 * clear the remaining slots.
3925 */
3926 if (sc->sc_type == WM_T_ICH8)
3927 size = WM_ICH8_RAL_TABSIZE;
3928 else
3929 size = WM_RAL_TABSIZE;
3930 wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3931 for (i = 1; i < size; i++)
3932 wm_set_ral(sc, NULL, i);
3933
3934 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3935 size = WM_ICH8_MC_TABSIZE;
3936 else
3937 size = WM_MC_TABSIZE;
3938 /* Clear out the multicast table. */
3939 for (i = 0; i < size; i++)
3940 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3941
3942 ETHER_FIRST_MULTI(step, ec, enm);
3943 while (enm != NULL) {
3944 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3945 /*
3946 * We must listen to a range of multicast addresses.
3947 * For now, just accept all multicasts, rather than
3948 * trying to set only those filter bits needed to match
3949 * the range. (At this time, the only use of address
3950 * ranges is for IP multicast routing, for which the
3951 * range is big enough to require all bits set.)
3952 */
3953 goto allmulti;
3954 }
3955
3956 hash = wm_mchash(sc, enm->enm_addrlo);
3957
3958 reg = (hash >> 5);
3959 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
3960 reg &= 0x1f;
3961 else
3962 reg &= 0x7f;
3963 bit = hash & 0x1f;
3964
3965 hash = CSR_READ(sc, mta_reg + (reg << 2));
3966 hash |= 1U << bit;
3967
3968 /* XXX Hardware bug?? */
3969 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3970 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3971 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3972 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3973 } else
3974 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3975
3976 ETHER_NEXT_MULTI(step, enm);
3977 }
3978
3979 ifp->if_flags &= ~IFF_ALLMULTI;
3980 goto setit;
3981
3982 allmulti:
3983 ifp->if_flags |= IFF_ALLMULTI;
3984 sc->sc_rctl |= RCTL_MPE;
3985
3986 setit:
3987 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3988 }
3989
3990 /*
3991 * wm_tbi_mediainit:
3992 *
3993 * Initialize media for use on 1000BASE-X devices.
3994 */
3995 static void
3996 wm_tbi_mediainit(struct wm_softc *sc)
3997 {
3998 const char *sep = "";
3999
4000 if (sc->sc_type < WM_T_82543)
4001 sc->sc_tipg = TIPG_WM_DFLT;
4002 else
4003 sc->sc_tipg = TIPG_LG_DFLT;
4004
4005 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
4006 wm_tbi_mediastatus);
4007
4008 /*
4009 * SWD Pins:
4010 *
4011 * 0 = Link LED (output)
4012 * 1 = Loss Of Signal (input)
4013 */
4014 sc->sc_ctrl |= CTRL_SWDPIO(0);
4015 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
4016
4017 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4018
4019 #define ADD(ss, mm, dd) \
4020 do { \
4021 aprint_normal("%s%s", sep, ss); \
4022 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
4023 sep = ", "; \
4024 } while (/*CONSTCOND*/0)
4025
4026 aprint_normal("%s: ", sc->sc_dev.dv_xname);
4027 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
4028 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
4029 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
4030 aprint_normal("\n");
4031
4032 #undef ADD
4033
4034 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4035 }
4036
4037 /*
4038 * wm_tbi_mediastatus: [ifmedia interface function]
4039 *
4040 * Get the current interface media status on a 1000BASE-X device.
4041 */
4042 static void
4043 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4044 {
4045 struct wm_softc *sc = ifp->if_softc;
4046 uint32_t ctrl;
4047
4048 ifmr->ifm_status = IFM_AVALID;
4049 ifmr->ifm_active = IFM_ETHER;
4050
4051 if (sc->sc_tbi_linkup == 0) {
4052 ifmr->ifm_active |= IFM_NONE;
4053 return;
4054 }
4055
4056 ifmr->ifm_status |= IFM_ACTIVE;
4057 ifmr->ifm_active |= IFM_1000_SX;
4058 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
4059 ifmr->ifm_active |= IFM_FDX;
4060 ctrl = CSR_READ(sc, WMREG_CTRL);
4061 if (ctrl & CTRL_RFCE)
4062 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
4063 if (ctrl & CTRL_TFCE)
4064 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
4065 }
4066
4067 /*
4068 * wm_tbi_mediachange: [ifmedia interface function]
4069 *
4070 * Set hardware to newly-selected media on a 1000BASE-X device.
4071 */
4072 static int
4073 wm_tbi_mediachange(struct ifnet *ifp)
4074 {
4075 struct wm_softc *sc = ifp->if_softc;
4076 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4077 uint32_t status;
4078 int i;
4079
4080 sc->sc_txcw = ife->ifm_data;
4081 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x on entry\n",
4082 sc->sc_dev.dv_xname,sc->sc_txcw));
4083 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
4084 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
4085 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
4086 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4087 sc->sc_txcw |= TXCW_ANE;
4088 } else {
4089 /*If autonegotiation is turned off, force link up and turn on full duplex*/
4090 sc->sc_txcw &= ~TXCW_ANE;
4091 sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
4092 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4093 delay(1000);
4094 }
4095
4096 DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
4097 sc->sc_dev.dv_xname,sc->sc_txcw));
4098 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
4099 delay(10000);
4100
4101 /* NOTE: CTRL will update TFCE and RFCE automatically. */
4102
4103 sc->sc_tbi_anstate = 0;
4104
4105 i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
4106 DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", sc->sc_dev.dv_xname,i));
4107
4108 /*
4109 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
4110 * optics detect a signal, 0 if they don't.
4111 */
4112 if (((i != 0) && (sc->sc_type >= WM_T_82544)) || (i == 0)) {
4113 /* Have signal; wait for the link to come up. */
4114
4115 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4116 /*
4117 * Reset the link, and let autonegotiation do its thing
4118 */
4119 sc->sc_ctrl |= CTRL_LRST;
4120 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4121 delay(1000);
4122 sc->sc_ctrl &= ~CTRL_LRST;
4123 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4124 delay(1000);
4125 }
4126
4127 for (i = 0; i < 50; i++) {
4128 delay(10000);
4129 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
4130 break;
4131 }
4132
4133 DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
4134 sc->sc_dev.dv_xname,i));
4135
4136 status = CSR_READ(sc, WMREG_STATUS);
4137 DPRINTF(WM_DEBUG_LINK,
4138 ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
4139 sc->sc_dev.dv_xname,status, STATUS_LU));
4140 if (status & STATUS_LU) {
4141 /* Link is up. */
4142 DPRINTF(WM_DEBUG_LINK,
4143 ("%s: LINK: set media -> link up %s\n",
4144 sc->sc_dev.dv_xname,
4145 (status & STATUS_FD) ? "FDX" : "HDX"));
4146 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4147 sc->sc_fcrtl &= ~FCRTL_XONE;
4148 if (status & STATUS_FD)
4149 sc->sc_tctl |=
4150 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4151 else
4152 sc->sc_tctl |=
4153 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4154 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
4155 sc->sc_fcrtl |= FCRTL_XONE;
4156 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4157 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4158 WMREG_OLD_FCRTL : WMREG_FCRTL,
4159 sc->sc_fcrtl);
4160 sc->sc_tbi_linkup = 1;
4161 } else {
4162 /* Link is down. */
4163 DPRINTF(WM_DEBUG_LINK,
4164 ("%s: LINK: set media -> link down\n",
4165 sc->sc_dev.dv_xname));
4166 sc->sc_tbi_linkup = 0;
4167 }
4168 } else {
4169 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
4170 sc->sc_dev.dv_xname));
4171 sc->sc_tbi_linkup = 0;
4172 }
4173
4174 wm_tbi_set_linkled(sc);
4175
4176 return (0);
4177 }
4178
4179 /*
4180 * wm_tbi_set_linkled:
4181 *
4182 * Update the link LED on 1000BASE-X devices.
4183 */
4184 static void
4185 wm_tbi_set_linkled(struct wm_softc *sc)
4186 {
4187
4188 if (sc->sc_tbi_linkup)
4189 sc->sc_ctrl |= CTRL_SWDPIN(0);
4190 else
4191 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
4192
4193 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4194 }
4195
4196 /*
4197 * wm_tbi_check_link:
4198 *
4199 * Check the link on 1000BASE-X devices.
4200 */
4201 static void
4202 wm_tbi_check_link(struct wm_softc *sc)
4203 {
4204 uint32_t rxcw, ctrl, status;
4205
4206 if (sc->sc_tbi_anstate == 0)
4207 return;
4208 else if (sc->sc_tbi_anstate > 1) {
4209 DPRINTF(WM_DEBUG_LINK,
4210 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
4211 sc->sc_tbi_anstate));
4212 sc->sc_tbi_anstate--;
4213 return;
4214 }
4215
4216 sc->sc_tbi_anstate = 0;
4217
4218 rxcw = CSR_READ(sc, WMREG_RXCW);
4219 ctrl = CSR_READ(sc, WMREG_CTRL);
4220 status = CSR_READ(sc, WMREG_STATUS);
4221
4222 if ((status & STATUS_LU) == 0) {
4223 DPRINTF(WM_DEBUG_LINK,
4224 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
4225 sc->sc_tbi_linkup = 0;
4226 } else {
4227 DPRINTF(WM_DEBUG_LINK,
4228 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
4229 (status & STATUS_FD) ? "FDX" : "HDX"));
4230 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4231 sc->sc_fcrtl &= ~FCRTL_XONE;
4232 if (status & STATUS_FD)
4233 sc->sc_tctl |=
4234 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4235 else
4236 sc->sc_tctl |=
4237 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4238 if (ctrl & CTRL_TFCE)
4239 sc->sc_fcrtl |= FCRTL_XONE;
4240 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4241 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
4242 WMREG_OLD_FCRTL : WMREG_FCRTL,
4243 sc->sc_fcrtl);
4244 sc->sc_tbi_linkup = 1;
4245 }
4246
4247 wm_tbi_set_linkled(sc);
4248 }
4249
4250 /*
4251 * wm_gmii_reset:
4252 *
4253 * Reset the PHY.
4254 */
4255 static void
4256 wm_gmii_reset(struct wm_softc *sc)
4257 {
4258 uint32_t reg;
4259 int func = 0; /* XXX gcc */
4260
4261 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)) {
4262 if (wm_get_swfwhw_semaphore(sc))
4263 return;
4264 }
4265 if (sc->sc_type == WM_T_80003) {
4266 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
4267 if (wm_get_swfw_semaphore(sc,
4268 func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4269 return;
4270 }
4271 if (sc->sc_type >= WM_T_82544) {
4272 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
4273 delay(20000);
4274
4275 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4276 delay(20000);
4277 } else {
4278 /*
4279 * With 82543, we need to force speed and duplex on the MAC
4280 * equal to what the PHY speed and duplex configuration is.
4281 * In addition, we need to perform a hardware reset on the PHY
4282 * to take it out of reset.
4283 */
4284 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4285 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4286
4287 /* The PHY reset pin is active-low. */
4288 reg = CSR_READ(sc, WMREG_CTRL_EXT);
4289 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
4290 CTRL_EXT_SWDPIN(4));
4291 reg |= CTRL_EXT_SWDPIO(4);
4292
4293 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4294 delay(10);
4295
4296 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4297 delay(10000);
4298
4299 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
4300 delay(10);
4301 #if 0
4302 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
4303 #endif
4304 }
4305 if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9))
4306 wm_put_swfwhw_semaphore(sc);
4307 if (sc->sc_type == WM_T_80003)
4308 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4309 }
4310
4311 /*
4312 * wm_gmii_mediainit:
4313 *
4314 * Initialize media for use on 1000BASE-T devices.
4315 */
4316 static void
4317 wm_gmii_mediainit(struct wm_softc *sc)
4318 {
4319 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
4320
4321 /* We have MII. */
4322 sc->sc_flags |= WM_F_HAS_MII;
4323
4324 if (sc->sc_type >= WM_T_80003)
4325 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4326 else
4327 sc->sc_tipg = TIPG_1000T_DFLT;
4328
4329 /*
4330 * Let the chip set speed/duplex on its own based on
4331 * signals from the PHY.
4332 * XXXbouyer - I'm not sure this is right for the 80003,
4333 * the em driver only sets CTRL_SLU here - but it seems to work.
4334 */
4335 sc->sc_ctrl |= CTRL_SLU;
4336 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4337
4338 /* Initialize our media structures and probe the GMII. */
4339 sc->sc_mii.mii_ifp = ifp;
4340
4341 if (sc->sc_type >= WM_T_80003) {
4342 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4343 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4344 } else if (sc->sc_type >= WM_T_82544) {
4345 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4346 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4347 } else {
4348 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4349 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4350 }
4351 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4352
4353 wm_gmii_reset(sc);
4354
4355 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4356 wm_gmii_mediastatus);
4357
4358 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4359 MII_OFFSET_ANY, MIIF_DOPAUSE);
4360 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4361 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4362 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4363 } else
4364 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4365 }
4366
4367 /*
4368 * wm_gmii_mediastatus: [ifmedia interface function]
4369 *
4370 * Get the current interface media status on a 1000BASE-T device.
4371 */
4372 static void
4373 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4374 {
4375 struct wm_softc *sc = ifp->if_softc;
4376
4377 mii_pollstat(&sc->sc_mii);
4378 ifmr->ifm_status = sc->sc_mii.mii_media_status;
4379 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
4380 sc->sc_flowflags;
4381 }
4382
4383 /*
4384 * wm_gmii_mediachange: [ifmedia interface function]
4385 *
4386 * Set hardware to newly-selected media on a 1000BASE-T device.
4387 */
4388 static int
4389 wm_gmii_mediachange(struct ifnet *ifp)
4390 {
4391 struct wm_softc *sc = ifp->if_softc;
4392 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4393
4394 if (ifp->if_flags & IFF_UP) {
4395 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4396 sc->sc_ctrl |= CTRL_SLU;
4397 if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
4398 || (sc->sc_type > WM_T_82543)) {
4399 sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
4400 } else {
4401 sc->sc_ctrl &= ~CTRL_ASDE;
4402 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4403 if (ife->ifm_media & IFM_FDX)
4404 sc->sc_ctrl |= CTRL_FD;
4405 switch(IFM_SUBTYPE(ife->ifm_media)) {
4406 case IFM_10_T:
4407 sc->sc_ctrl |= CTRL_SPEED_10;
4408 break;
4409 case IFM_100_TX:
4410 sc->sc_ctrl |= CTRL_SPEED_100;
4411 break;
4412 case IFM_1000_T:
4413 sc->sc_ctrl |= CTRL_SPEED_1000;
4414 break;
4415 default:
4416 panic("wm_gmii_mediachange: bad media 0x%x",
4417 ife->ifm_media);
4418 }
4419 }
4420 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4421 if (sc->sc_type <= WM_T_82543)
4422 wm_gmii_reset(sc);
4423 mii_mediachg(&sc->sc_mii);
4424 }
4425 return (0);
4426 }
4427
4428 #define MDI_IO CTRL_SWDPIN(2)
4429 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4430 #define MDI_CLK CTRL_SWDPIN(3)
4431
4432 static void
4433 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4434 {
4435 uint32_t i, v;
4436
4437 v = CSR_READ(sc, WMREG_CTRL);
4438 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4439 v |= MDI_DIR | CTRL_SWDPIO(3);
4440
4441 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4442 if (data & i)
4443 v |= MDI_IO;
4444 else
4445 v &= ~MDI_IO;
4446 CSR_WRITE(sc, WMREG_CTRL, v);
4447 delay(10);
4448 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4449 delay(10);
4450 CSR_WRITE(sc, WMREG_CTRL, v);
4451 delay(10);
4452 }
4453 }
4454
4455 static uint32_t
4456 i82543_mii_recvbits(struct wm_softc *sc)
4457 {
4458 uint32_t v, i, data = 0;
4459
4460 v = CSR_READ(sc, WMREG_CTRL);
4461 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4462 v |= CTRL_SWDPIO(3);
4463
4464 CSR_WRITE(sc, WMREG_CTRL, v);
4465 delay(10);
4466 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4467 delay(10);
4468 CSR_WRITE(sc, WMREG_CTRL, v);
4469 delay(10);
4470
4471 for (i = 0; i < 16; i++) {
4472 data <<= 1;
4473 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4474 delay(10);
4475 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4476 data |= 1;
4477 CSR_WRITE(sc, WMREG_CTRL, v);
4478 delay(10);
4479 }
4480
4481 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4482 delay(10);
4483 CSR_WRITE(sc, WMREG_CTRL, v);
4484 delay(10);
4485
4486 return (data);
4487 }
4488
4489 #undef MDI_IO
4490 #undef MDI_DIR
4491 #undef MDI_CLK
4492
4493 /*
4494 * wm_gmii_i82543_readreg: [mii interface function]
4495 *
4496 * Read a PHY register on the GMII (i82543 version).
4497 */
4498 static int
4499 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
4500 {
4501 struct wm_softc *sc = (void *) self;
4502 int rv;
4503
4504 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4505 i82543_mii_sendbits(sc, reg | (phy << 5) |
4506 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4507 rv = i82543_mii_recvbits(sc) & 0xffff;
4508
4509 DPRINTF(WM_DEBUG_GMII,
4510 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4511 sc->sc_dev.dv_xname, phy, reg, rv));
4512
4513 return (rv);
4514 }
4515
4516 /*
4517 * wm_gmii_i82543_writereg: [mii interface function]
4518 *
4519 * Write a PHY register on the GMII (i82543 version).
4520 */
4521 static void
4522 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
4523 {
4524 struct wm_softc *sc = (void *) self;
4525
4526 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4527 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4528 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4529 (MII_COMMAND_START << 30), 32);
4530 }
4531
4532 /*
4533 * wm_gmii_i82544_readreg: [mii interface function]
4534 *
4535 * Read a PHY register on the GMII.
4536 */
4537 static int
4538 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
4539 {
4540 struct wm_softc *sc = (void *) self;
4541 uint32_t mdic = 0;
4542 int i, rv;
4543
4544 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4545 MDIC_REGADD(reg));
4546
4547 for (i = 0; i < 320; i++) {
4548 mdic = CSR_READ(sc, WMREG_MDIC);
4549 if (mdic & MDIC_READY)
4550 break;
4551 delay(10);
4552 }
4553
4554 if ((mdic & MDIC_READY) == 0) {
4555 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4556 sc->sc_dev.dv_xname, phy, reg);
4557 rv = 0;
4558 } else if (mdic & MDIC_E) {
4559 #if 0 /* This is normal if no PHY is present. */
4560 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4561 sc->sc_dev.dv_xname, phy, reg);
4562 #endif
4563 rv = 0;
4564 } else {
4565 rv = MDIC_DATA(mdic);
4566 if (rv == 0xffff)
4567 rv = 0;
4568 }
4569
4570 return (rv);
4571 }
4572
4573 /*
4574 * wm_gmii_i82544_writereg: [mii interface function]
4575 *
4576 * Write a PHY register on the GMII.
4577 */
4578 static void
4579 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
4580 {
4581 struct wm_softc *sc = (void *) self;
4582 uint32_t mdic = 0;
4583 int i;
4584
4585 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4586 MDIC_REGADD(reg) | MDIC_DATA(val));
4587
4588 for (i = 0; i < 320; i++) {
4589 mdic = CSR_READ(sc, WMREG_MDIC);
4590 if (mdic & MDIC_READY)
4591 break;
4592 delay(10);
4593 }
4594
4595 if ((mdic & MDIC_READY) == 0)
4596 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4597 sc->sc_dev.dv_xname, phy, reg);
4598 else if (mdic & MDIC_E)
4599 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4600 sc->sc_dev.dv_xname, phy, reg);
4601 }
4602
4603 /*
4604 * wm_gmii_i80003_readreg: [mii interface function]
4605 *
4606 * Read a PHY register on the kumeran
4607 * This could be handled by the PHY layer if we didn't have to lock the
4608 * ressource ...
4609 */
4610 static int
4611 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
4612 {
4613 struct wm_softc *sc = (void *) self;
4614 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4615 int rv;
4616
4617 if (phy != 1) /* only one PHY on kumeran bus */
4618 return 0;
4619
4620 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4621 return 0;
4622
4623 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4624 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4625 reg >> GG82563_PAGE_SHIFT);
4626 } else {
4627 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4628 reg >> GG82563_PAGE_SHIFT);
4629 }
4630
4631 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4632 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4633 return (rv);
4634 }
4635
4636 /*
4637 * wm_gmii_i80003_writereg: [mii interface function]
4638 *
4639 * Write a PHY register on the kumeran.
4640 * This could be handled by the PHY layer if we didn't have to lock the
4641 * ressource ...
4642 */
4643 static void
4644 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
4645 {
4646 struct wm_softc *sc = (void *) self;
4647 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4648
4649 if (phy != 1) /* only one PHY on kumeran bus */
4650 return;
4651
4652 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4653 return;
4654
4655 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4656 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4657 reg >> GG82563_PAGE_SHIFT);
4658 } else {
4659 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4660 reg >> GG82563_PAGE_SHIFT);
4661 }
4662
4663 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4664 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4665 }
4666
4667 /*
4668 * wm_gmii_statchg: [mii interface function]
4669 *
4670 * Callback from MII layer when media changes.
4671 */
4672 static void
4673 wm_gmii_statchg(struct device *self)
4674 {
4675 struct wm_softc *sc = (void *) self;
4676 struct mii_data *mii = &sc->sc_mii;
4677
4678 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4679 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4680 sc->sc_fcrtl &= ~FCRTL_XONE;
4681
4682 /*
4683 * Get flow control negotiation result.
4684 */
4685 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4686 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4687 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4688 mii->mii_media_active &= ~IFM_ETH_FMASK;
4689 }
4690
4691 if (sc->sc_flowflags & IFM_FLOW) {
4692 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4693 sc->sc_ctrl |= CTRL_TFCE;
4694 sc->sc_fcrtl |= FCRTL_XONE;
4695 }
4696 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4697 sc->sc_ctrl |= CTRL_RFCE;
4698 }
4699
4700 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4701 DPRINTF(WM_DEBUG_LINK,
4702 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
4703 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4704 } else {
4705 DPRINTF(WM_DEBUG_LINK,
4706 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
4707 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4708 }
4709
4710 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4711 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4712 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4713 : WMREG_FCRTL, sc->sc_fcrtl);
4714 if (sc->sc_type >= WM_T_80003) {
4715 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4716 case IFM_1000_T:
4717 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4718 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4719 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4720 break;
4721 default:
4722 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4723 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4724 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4725 break;
4726 }
4727 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4728 }
4729 }
4730
4731 /*
4732 * wm_kmrn_i80003_readreg:
4733 *
4734 * Read a kumeran register
4735 */
4736 static int
4737 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4738 {
4739 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4740 int rv;
4741
4742 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4743 return 0;
4744
4745 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4746 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4747 KUMCTRLSTA_REN);
4748 delay(2);
4749
4750 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4751 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4752 return (rv);
4753 }
4754
4755 /*
4756 * wm_kmrn_i80003_writereg:
4757 *
4758 * Write a kumeran register
4759 */
4760 static void
4761 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4762 {
4763 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4764
4765 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4766 return;
4767
4768 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4769 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4770 (val & KUMCTRLSTA_MASK));
4771 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4772 }
4773
4774 static int
4775 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4776 {
4777 uint32_t eecd = 0;
4778
4779 if (sc->sc_type == WM_T_82573) {
4780 eecd = CSR_READ(sc, WMREG_EECD);
4781
4782 /* Isolate bits 15 & 16 */
4783 eecd = ((eecd >> 15) & 0x03);
4784
4785 /* If both bits are set, device is Flash type */
4786 if (eecd == 0x03) {
4787 return 0;
4788 }
4789 }
4790 return 1;
4791 }
4792
4793 static int
4794 wm_get_swsm_semaphore(struct wm_softc *sc)
4795 {
4796 int32_t timeout;
4797 uint32_t swsm;
4798
4799 /* Get the FW semaphore. */
4800 timeout = 1000 + 1; /* XXX */
4801 while (timeout) {
4802 swsm = CSR_READ(sc, WMREG_SWSM);
4803 swsm |= SWSM_SWESMBI;
4804 CSR_WRITE(sc, WMREG_SWSM, swsm);
4805 /* if we managed to set the bit we got the semaphore. */
4806 swsm = CSR_READ(sc, WMREG_SWSM);
4807 if (swsm & SWSM_SWESMBI)
4808 break;
4809
4810 delay(50);
4811 timeout--;
4812 }
4813
4814 if (timeout == 0) {
4815 aprint_error("%s: could not acquire EEPROM GNT\n",
4816 sc->sc_dev.dv_xname);
4817 /* Release semaphores */
4818 wm_put_swsm_semaphore(sc);
4819 return 1;
4820 }
4821 return 0;
4822 }
4823
4824 static void
4825 wm_put_swsm_semaphore(struct wm_softc *sc)
4826 {
4827 uint32_t swsm;
4828
4829 swsm = CSR_READ(sc, WMREG_SWSM);
4830 swsm &= ~(SWSM_SWESMBI);
4831 CSR_WRITE(sc, WMREG_SWSM, swsm);
4832 }
4833
4834 static int
4835 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4836 {
4837 uint32_t swfw_sync;
4838 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4839 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4840 int timeout = 200;
4841
4842 for(timeout = 0; timeout < 200; timeout++) {
4843 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4844 if (wm_get_swsm_semaphore(sc))
4845 return 1;
4846 }
4847 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4848 if ((swfw_sync & (swmask | fwmask)) == 0) {
4849 swfw_sync |= swmask;
4850 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4851 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4852 wm_put_swsm_semaphore(sc);
4853 return 0;
4854 }
4855 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4856 wm_put_swsm_semaphore(sc);
4857 delay(5000);
4858 }
4859 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4860 sc->sc_dev.dv_xname, mask, swfw_sync);
4861 return 1;
4862 }
4863
4864 static void
4865 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
4866 {
4867 uint32_t swfw_sync;
4868
4869 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4870 while (wm_get_swsm_semaphore(sc) != 0)
4871 continue;
4872 }
4873 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4874 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4875 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4876 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4877 wm_put_swsm_semaphore(sc);
4878 }
4879
4880 static int
4881 wm_get_swfwhw_semaphore(struct wm_softc *sc)
4882 {
4883 uint32_t ext_ctrl;
4884 int timeout = 200;
4885
4886 for(timeout = 0; timeout < 200; timeout++) {
4887 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4888 ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
4889 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4890
4891 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4892 if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
4893 return 0;
4894 delay(5000);
4895 }
4896 printf("%s: failed to get swfwgw semaphore ext_ctrl 0x%x\n",
4897 sc->sc_dev.dv_xname, ext_ctrl);
4898 return 1;
4899 }
4900
4901 static void
4902 wm_put_swfwhw_semaphore(struct wm_softc *sc)
4903 {
4904 uint32_t ext_ctrl;
4905 ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
4906 ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
4907 CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
4908 }
4909
4910 /******************************************************************************
4911 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
4912 * register.
4913 *
4914 * sc - Struct containing variables accessed by shared code
4915 * offset - offset of word in the EEPROM to read
4916 * data - word read from the EEPROM
4917 * words - number of words to read
4918 *****************************************************************************/
4919 static int
4920 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
4921 {
4922 int32_t error = 0;
4923 uint32_t flash_bank = 0;
4924 uint32_t act_offset = 0;
4925 uint32_t bank_offset = 0;
4926 uint16_t word = 0;
4927 uint16_t i = 0;
4928
4929 /* We need to know which is the valid flash bank. In the event
4930 * that we didn't allocate eeprom_shadow_ram, we may not be
4931 * managing flash_bank. So it cannot be trusted and needs
4932 * to be updated with each read.
4933 */
4934 /* Value of bit 22 corresponds to the flash bank we're on. */
4935 flash_bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
4936
4937 /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
4938 bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
4939
4940 error = wm_get_swfwhw_semaphore(sc);
4941 if (error)
4942 return error;
4943
4944 for (i = 0; i < words; i++) {
4945 /* The NVM part needs a byte offset, hence * 2 */
4946 act_offset = bank_offset + ((offset + i) * 2);
4947 error = wm_read_ich8_word(sc, act_offset, &word);
4948 if (error)
4949 break;
4950 data[i] = word;
4951 }
4952
4953 wm_put_swfwhw_semaphore(sc);
4954 return error;
4955 }
4956
4957 /******************************************************************************
4958 * This function does initial flash setup so that a new read/write/erase cycle
4959 * can be started.
4960 *
4961 * sc - The pointer to the hw structure
4962 ****************************************************************************/
4963 static int32_t
4964 wm_ich8_cycle_init(struct wm_softc *sc)
4965 {
4966 uint16_t hsfsts;
4967 int32_t error = 1;
4968 int32_t i = 0;
4969
4970 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
4971
4972 /* May be check the Flash Des Valid bit in Hw status */
4973 if ((hsfsts & HSFSTS_FLDVAL) == 0) {
4974 return error;
4975 }
4976
4977 /* Clear FCERR in Hw status by writing 1 */
4978 /* Clear DAEL in Hw status by writing a 1 */
4979 hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
4980
4981 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4982
4983 /* Either we should have a hardware SPI cycle in progress bit to check
4984 * against, in order to start a new cycle or FDONE bit should be changed
4985 * in the hardware so that it is 1 after harware reset, which can then be
4986 * used as an indication whether a cycle is in progress or has been
4987 * completed .. we should also have some software semaphore mechanism to
4988 * guard FDONE or the cycle in progress bit so that two threads access to
4989 * those bits can be sequentiallized or a way so that 2 threads dont
4990 * start the cycle at the same time */
4991
4992 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
4993 /* There is no cycle running at present, so we can start a cycle */
4994 /* Begin by setting Flash Cycle Done. */
4995 hsfsts |= HSFSTS_DONE;
4996 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
4997 error = 0;
4998 } else {
4999 /* otherwise poll for sometime so the current cycle has a chance
5000 * to end before giving up. */
5001 for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
5002 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5003 if ((hsfsts & HSFSTS_FLINPRO) == 0) {
5004 error = 0;
5005 break;
5006 }
5007 delay(1);
5008 }
5009 if (error == 0) {
5010 /* Successful in waiting for previous cycle to timeout,
5011 * now set the Flash Cycle Done. */
5012 hsfsts |= HSFSTS_DONE;
5013 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
5014 }
5015 }
5016 return error;
5017 }
5018
5019 /******************************************************************************
5020 * This function starts a flash cycle and waits for its completion
5021 *
5022 * sc - The pointer to the hw structure
5023 ****************************************************************************/
5024 static int32_t
5025 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
5026 {
5027 uint16_t hsflctl;
5028 uint16_t hsfsts;
5029 int32_t error = 1;
5030 uint32_t i = 0;
5031
5032 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
5033 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5034 hsflctl |= HSFCTL_GO;
5035 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5036
5037 /* wait till FDONE bit is set to 1 */
5038 do {
5039 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5040 if (hsfsts & HSFSTS_DONE)
5041 break;
5042 delay(1);
5043 i++;
5044 } while (i < timeout);
5045 if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0) {
5046 error = 0;
5047 }
5048 return error;
5049 }
5050
5051 /******************************************************************************
5052 * Reads a byte or word from the NVM using the ICH8 flash access registers.
5053 *
5054 * sc - The pointer to the hw structure
5055 * index - The index of the byte or word to read.
5056 * size - Size of data to read, 1=byte 2=word
5057 * data - Pointer to the word to store the value read.
5058 *****************************************************************************/
5059 static int32_t
5060 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
5061 uint32_t size, uint16_t* data)
5062 {
5063 uint16_t hsfsts;
5064 uint16_t hsflctl;
5065 uint32_t flash_linear_address;
5066 uint32_t flash_data = 0;
5067 int32_t error = 1;
5068 int32_t count = 0;
5069
5070 if (size < 1 || size > 2 || data == 0x0 ||
5071 index > ICH_FLASH_LINEAR_ADDR_MASK)
5072 return error;
5073
5074 flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
5075 sc->sc_ich8_flash_base;
5076
5077 do {
5078 delay(1);
5079 /* Steps */
5080 error = wm_ich8_cycle_init(sc);
5081 if (error)
5082 break;
5083
5084 hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
5085 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
5086 hsflctl |= ((size - 1) << HSFCTL_BCOUNT_SHIFT) & HSFCTL_BCOUNT_MASK;
5087 hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
5088 ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
5089
5090 /* Write the last 24 bits of index into Flash Linear address field in
5091 * Flash Address */
5092 /* TODO: TBD maybe check the index against the size of flash */
5093
5094 ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
5095
5096 error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
5097
5098 /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
5099 * sequence a few more times, else read in (shift in) the Flash Data0,
5100 * the order is least significant byte first msb to lsb */
5101 if (error == 0) {
5102 flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
5103 if (size == 1) {
5104 *data = (uint8_t)(flash_data & 0x000000FF);
5105 } else if (size == 2) {
5106 *data = (uint16_t)(flash_data & 0x0000FFFF);
5107 }
5108 break;
5109 } else {
5110 /* If we've gotten here, then things are probably completely hosed,
5111 * but if the error condition is detected, it won't hurt to give
5112 * it another try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
5113 */
5114 hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
5115 if (hsfsts & HSFSTS_ERR) {
5116 /* Repeat for some time before giving up. */
5117 continue;
5118 } else if ((hsfsts & HSFSTS_DONE) == 0) {
5119 break;
5120 }
5121 }
5122 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
5123
5124 return error;
5125 }
5126
5127 #if 0
5128 /******************************************************************************
5129 * Reads a single byte from the NVM using the ICH8 flash access registers.
5130 *
5131 * sc - pointer to wm_hw structure
5132 * index - The index of the byte to read.
5133 * data - Pointer to a byte to store the value read.
5134 *****************************************************************************/
5135 static int32_t
5136 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
5137 {
5138 int32_t status;
5139 uint16_t word = 0;
5140
5141 status = wm_read_ich8_data(sc, index, 1, &word);
5142 if (status == 0) {
5143 *data = (uint8_t)word;
5144 }
5145
5146 return status;
5147 }
5148 #endif
5149
5150 /******************************************************************************
5151 * Reads a word from the NVM using the ICH8 flash access registers.
5152 *
5153 * sc - pointer to wm_hw structure
5154 * index - The starting byte index of the word to read.
5155 * data - Pointer to a word to store the value read.
5156 *****************************************************************************/
5157 static int32_t
5158 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
5159 {
5160 int32_t status;
5161
5162 status = wm_read_ich8_data(sc, index, 2, data);
5163 return status;
5164 }
5165