if_wm.c revision 1.130 1 /* $NetBSD: if_wm.c,v 1.130 2006/11/16 06:07:54 yamt Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out what to do with the i82545GM and i82546GB
45 * SERDES controllers.
46 * - Fix hw VLAN assist.
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.130 2006/11/16 06:07:54 yamt Exp $");
51
52 #include "bpfilter.h"
53 #include "rnd.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/callout.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/socket.h>
62 #include <sys/ioctl.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/queue.h>
66 #include <sys/syslog.h>
67
68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
69
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95 #include <dev/mii/ikphyreg.h>
96
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100
101 #include <dev/pci/if_wmreg.h>
102
103 #ifdef WM_DEBUG
104 #define WM_DEBUG_LINK 0x01
105 #define WM_DEBUG_TX 0x02
106 #define WM_DEBUG_RX 0x04
107 #define WM_DEBUG_GMII 0x08
108 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK|WM_DEBUG_GMII;
109
110 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
111 #else
112 #define DPRINTF(x, y) /* nothing */
113 #endif /* WM_DEBUG */
114
115 /*
116 * Transmit descriptor list size. Due to errata, we can only have
117 * 256 hardware descriptors in the ring on < 82544, but we use 4096
118 * on >= 82544. We tell the upper layers that they can queue a lot
119 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
120 * of them at a time.
121 *
122 * We allow up to 256 (!) DMA segments per packet. Pathological packet
123 * chains containing many small mbufs have been observed in zero-copy
124 * situations with jumbo frames.
125 */
126 #define WM_NTXSEGS 256
127 #define WM_IFQUEUELEN 256
128 #define WM_TXQUEUELEN_MAX 64
129 #define WM_TXQUEUELEN_MAX_82547 16
130 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
131 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
132 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
133 #define WM_NTXDESC_82542 256
134 #define WM_NTXDESC_82544 4096
135 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
136 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
137 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
138 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
139 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
140
141 #define WM_MAXTXDMA round_page(IP_MAXPACKET) /* for TSO */
142
143 /*
144 * Receive descriptor list size. We have one Rx buffer for normal
145 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
146 * packet. We allocate 256 receive descriptors, each with a 2k
147 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
148 */
149 #define WM_NRXDESC 256
150 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
151 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
152 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
153
154 /*
155 * Control structures are DMA'd to the i82542 chip. We allocate them in
156 * a single clump that maps to a single DMA segment to make several things
157 * easier.
158 */
159 struct wm_control_data_82544 {
160 /*
161 * The receive descriptors.
162 */
163 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
164
165 /*
166 * The transmit descriptors. Put these at the end, because
167 * we might use a smaller number of them.
168 */
169 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
170 };
171
172 struct wm_control_data_82542 {
173 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
174 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
175 };
176
177 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
178 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
179 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
180
181 /*
182 * Software state for transmit jobs.
183 */
184 struct wm_txsoft {
185 struct mbuf *txs_mbuf; /* head of our mbuf chain */
186 bus_dmamap_t txs_dmamap; /* our DMA map */
187 int txs_firstdesc; /* first descriptor in packet */
188 int txs_lastdesc; /* last descriptor in packet */
189 int txs_ndesc; /* # of descriptors used */
190 };
191
192 /*
193 * Software state for receive buffers. Each descriptor gets a
194 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
195 * more than one buffer, we chain them together.
196 */
197 struct wm_rxsoft {
198 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
199 bus_dmamap_t rxs_dmamap; /* our DMA map */
200 };
201
202 typedef enum {
203 WM_T_unknown = 0,
204 WM_T_82542_2_0, /* i82542 2.0 (really old) */
205 WM_T_82542_2_1, /* i82542 2.1+ (old) */
206 WM_T_82543, /* i82543 */
207 WM_T_82544, /* i82544 */
208 WM_T_82540, /* i82540 */
209 WM_T_82545, /* i82545 */
210 WM_T_82545_3, /* i82545 3.0+ */
211 WM_T_82546, /* i82546 */
212 WM_T_82546_3, /* i82546 3.0+ */
213 WM_T_82541, /* i82541 */
214 WM_T_82541_2, /* i82541 2.0+ */
215 WM_T_82547, /* i82547 */
216 WM_T_82547_2, /* i82547 2.0+ */
217 WM_T_82571, /* i82571 */
218 WM_T_82572, /* i82572 */
219 WM_T_82573, /* i82573 */
220 WM_T_80003, /* i80003 */
221 } wm_chip_type;
222
223 /*
224 * Software state per device.
225 */
226 struct wm_softc {
227 struct device sc_dev; /* generic device information */
228 bus_space_tag_t sc_st; /* bus space tag */
229 bus_space_handle_t sc_sh; /* bus space handle */
230 bus_space_tag_t sc_iot; /* I/O space tag */
231 bus_space_handle_t sc_ioh; /* I/O space handle */
232 bus_dma_tag_t sc_dmat; /* bus DMA tag */
233 struct ethercom sc_ethercom; /* ethernet common data */
234 void *sc_sdhook; /* shutdown hook */
235 void *sc_powerhook; /* power hook */
236 pci_chipset_tag_t sc_pc;
237 pcitag_t sc_pcitag;
238 struct pci_conf_state sc_pciconf;
239
240 wm_chip_type sc_type; /* chip type */
241 int sc_flags; /* flags; see below */
242 int sc_bus_speed; /* PCI/PCIX bus speed */
243 int sc_pcix_offset; /* PCIX capability register offset */
244 int sc_flowflags; /* 802.3x flow control flags */
245
246 void *sc_ih; /* interrupt cookie */
247
248 int sc_ee_addrbits; /* EEPROM address bits */
249
250 struct mii_data sc_mii; /* MII/media information */
251
252 struct callout sc_tick_ch; /* tick callout */
253
254 bus_dmamap_t sc_cddmamap; /* control data DMA map */
255 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
256
257 int sc_align_tweak;
258
259 /*
260 * Software state for the transmit and receive descriptors.
261 */
262 int sc_txnum; /* must be a power of two */
263 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
264 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
265
266 /*
267 * Control data structures.
268 */
269 int sc_ntxdesc; /* must be a power of two */
270 struct wm_control_data_82544 *sc_control_data;
271 #define sc_txdescs sc_control_data->wcd_txdescs
272 #define sc_rxdescs sc_control_data->wcd_rxdescs
273
274 #ifdef WM_EVENT_COUNTERS
275 /* Event counters. */
276 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
277 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
278 struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
279 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
280 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
281 struct evcnt sc_ev_rxintr; /* Rx interrupts */
282 struct evcnt sc_ev_linkintr; /* Link interrupts */
283
284 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
285 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
286 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
287 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
288 struct evcnt sc_ev_txtusum6; /* TCP/UDP v6 cksums comp. out-bound */
289 struct evcnt sc_ev_txtso; /* TCP seg offload out-bound */
290 struct evcnt sc_ev_txtsopain; /* painful header manip. for TSO */
291
292 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
293 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
294
295 struct evcnt sc_ev_tu; /* Tx underrun */
296
297 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
298 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
299 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
300 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
301 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
302 #endif /* WM_EVENT_COUNTERS */
303
304 bus_addr_t sc_tdt_reg; /* offset of TDT register */
305
306 int sc_txfree; /* number of free Tx descriptors */
307 int sc_txnext; /* next ready Tx descriptor */
308
309 int sc_txsfree; /* number of free Tx jobs */
310 int sc_txsnext; /* next free Tx job */
311 int sc_txsdirty; /* dirty Tx jobs */
312
313 /* These 5 variables are used only on the 82547. */
314 int sc_txfifo_size; /* Tx FIFO size */
315 int sc_txfifo_head; /* current head of FIFO */
316 uint32_t sc_txfifo_addr; /* internal address of start of FIFO */
317 int sc_txfifo_stall; /* Tx FIFO is stalled */
318 struct callout sc_txfifo_ch; /* Tx FIFO stall work-around timer */
319
320 bus_addr_t sc_rdt_reg; /* offset of RDT register */
321
322 int sc_rxptr; /* next ready Rx descriptor/queue ent */
323 int sc_rxdiscard;
324 int sc_rxlen;
325 struct mbuf *sc_rxhead;
326 struct mbuf *sc_rxtail;
327 struct mbuf **sc_rxtailp;
328
329 uint32_t sc_ctrl; /* prototype CTRL register */
330 #if 0
331 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
332 #endif
333 uint32_t sc_icr; /* prototype interrupt bits */
334 uint32_t sc_itr; /* prototype intr throttling reg */
335 uint32_t sc_tctl; /* prototype TCTL register */
336 uint32_t sc_rctl; /* prototype RCTL register */
337 uint32_t sc_txcw; /* prototype TXCW register */
338 uint32_t sc_tipg; /* prototype TIPG register */
339 uint32_t sc_fcrtl; /* prototype FCRTL register */
340 uint32_t sc_pba; /* prototype PBA register */
341
342 int sc_tbi_linkup; /* TBI link status */
343 int sc_tbi_anstate; /* autonegotiation state */
344
345 int sc_mchash_type; /* multicast filter offset */
346
347 #if NRND > 0
348 rndsource_element_t rnd_source; /* random source */
349 #endif
350 };
351
352 #define WM_RXCHAIN_RESET(sc) \
353 do { \
354 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
355 *(sc)->sc_rxtailp = NULL; \
356 (sc)->sc_rxlen = 0; \
357 } while (/*CONSTCOND*/0)
358
359 #define WM_RXCHAIN_LINK(sc, m) \
360 do { \
361 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
362 (sc)->sc_rxtailp = &(m)->m_next; \
363 } while (/*CONSTCOND*/0)
364
365 /* sc_flags */
366 #define WM_F_HAS_MII 0x0001 /* has MII */
367 #define WM_F_EEPROM_HANDSHAKE 0x0002 /* requires EEPROM handshake */
368 #define WM_F_EEPROM_SEMAPHORE 0x0004 /* EEPROM with semaphore */
369 #define WM_F_EEPROM_EERDEEWR 0x0008 /* EEPROM access via EERD/EEWR */
370 #define WM_F_EEPROM_SPI 0x0010 /* EEPROM is SPI */
371 #define WM_F_EEPROM_FLASH 0x0020 /* EEPROM is FLASH */
372 #define WM_F_EEPROM_INVALID 0x0040 /* EEPROM not present (bad checksum) */
373 #define WM_F_IOH_VALID 0x0080 /* I/O handle is valid */
374 #define WM_F_BUS64 0x0100 /* bus is 64-bit */
375 #define WM_F_PCIX 0x0200 /* bus is PCI-X */
376 #define WM_F_CSA 0x0400 /* bus is CSA */
377 #define WM_F_PCIE 0x0800 /* bus is PCI-Express */
378 #define WM_F_SWFW_SYNC 0x1000 /* Software-Firmware synchronisation */
379
380 #ifdef WM_EVENT_COUNTERS
381 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
382 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
383 #else
384 #define WM_EVCNT_INCR(ev) /* nothing */
385 #define WM_EVCNT_ADD(ev, val) /* nothing */
386 #endif
387
388 #define CSR_READ(sc, reg) \
389 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
390 #define CSR_WRITE(sc, reg, val) \
391 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
392 #define CSR_WRITE_FLUSH(sc) \
393 (void) CSR_READ((sc), WMREG_STATUS)
394
395 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
396 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
397
398 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
399 #define WM_CDTXADDR_HI(sc, x) \
400 (sizeof(bus_addr_t) == 8 ? \
401 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
402
403 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
404 #define WM_CDRXADDR_HI(sc, x) \
405 (sizeof(bus_addr_t) == 8 ? \
406 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
407
408 #define WM_CDTXSYNC(sc, x, n, ops) \
409 do { \
410 int __x, __n; \
411 \
412 __x = (x); \
413 __n = (n); \
414 \
415 /* If it will wrap around, sync to the end of the ring. */ \
416 if ((__x + __n) > WM_NTXDESC(sc)) { \
417 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
418 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
419 (WM_NTXDESC(sc) - __x), (ops)); \
420 __n -= (WM_NTXDESC(sc) - __x); \
421 __x = 0; \
422 } \
423 \
424 /* Now sync whatever is left. */ \
425 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
426 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
427 } while (/*CONSTCOND*/0)
428
429 #define WM_CDRXSYNC(sc, x, ops) \
430 do { \
431 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
432 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
433 } while (/*CONSTCOND*/0)
434
435 #define WM_INIT_RXDESC(sc, x) \
436 do { \
437 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
438 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
439 struct mbuf *__m = __rxs->rxs_mbuf; \
440 \
441 /* \
442 * Note: We scoot the packet forward 2 bytes in the buffer \
443 * so that the payload after the Ethernet header is aligned \
444 * to a 4-byte boundary. \
445 * \
446 * XXX BRAINDAMAGE ALERT! \
447 * The stupid chip uses the same size for every buffer, which \
448 * is set in the Receive Control register. We are using the 2K \
449 * size option, but what we REALLY want is (2K - 2)! For this \
450 * reason, we can't "scoot" packets longer than the standard \
451 * Ethernet MTU. On strict-alignment platforms, if the total \
452 * size exceeds (2K - 2) we set align_tweak to 0 and let \
453 * the upper layer copy the headers. \
454 */ \
455 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
456 \
457 wm_set_dma_addr(&__rxd->wrx_addr, \
458 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
459 __rxd->wrx_len = 0; \
460 __rxd->wrx_cksum = 0; \
461 __rxd->wrx_status = 0; \
462 __rxd->wrx_errors = 0; \
463 __rxd->wrx_special = 0; \
464 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
465 \
466 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
467 } while (/*CONSTCOND*/0)
468
469 static void wm_start(struct ifnet *);
470 static void wm_watchdog(struct ifnet *);
471 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
472 static int wm_init(struct ifnet *);
473 static void wm_stop(struct ifnet *, int);
474
475 static void wm_shutdown(void *);
476 static void wm_powerhook(int, void *);
477
478 static void wm_reset(struct wm_softc *);
479 static void wm_rxdrain(struct wm_softc *);
480 static int wm_add_rxbuf(struct wm_softc *, int);
481 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
482 static int wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
483 static int wm_validate_eeprom_checksum(struct wm_softc *);
484 static void wm_tick(void *);
485
486 static void wm_set_filter(struct wm_softc *);
487
488 static int wm_intr(void *);
489 static void wm_txintr(struct wm_softc *);
490 static void wm_rxintr(struct wm_softc *);
491 static void wm_linkintr(struct wm_softc *, uint32_t);
492
493 static void wm_tbi_mediainit(struct wm_softc *);
494 static int wm_tbi_mediachange(struct ifnet *);
495 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
496
497 static void wm_tbi_set_linkled(struct wm_softc *);
498 static void wm_tbi_check_link(struct wm_softc *);
499
500 static void wm_gmii_reset(struct wm_softc *);
501
502 static int wm_gmii_i82543_readreg(struct device *, int, int);
503 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
504
505 static int wm_gmii_i82544_readreg(struct device *, int, int);
506 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
507
508 static int wm_gmii_i80003_readreg(struct device *, int, int);
509 static void wm_gmii_i80003_writereg(struct device *, int, int, int);
510
511 static void wm_gmii_statchg(struct device *);
512
513 static void wm_gmii_mediainit(struct wm_softc *);
514 static int wm_gmii_mediachange(struct ifnet *);
515 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
516
517 static int wm_kmrn_i80003_readreg(struct wm_softc *, int);
518 static void wm_kmrn_i80003_writereg(struct wm_softc *, int, int);
519
520 static int wm_match(struct device *, struct cfdata *, void *);
521 static void wm_attach(struct device *, struct device *, void *);
522 static int wm_is_onboard_nvm_eeprom(struct wm_softc *);
523 static int wm_get_swsm_semaphore(struct wm_softc *);
524 static void wm_put_swsm_semaphore(struct wm_softc *);
525 static int wm_poll_eerd_eewr_done(struct wm_softc *, int);
526 static int wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
527 static void wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
528
529 CFATTACH_DECL(wm, sizeof(struct wm_softc),
530 wm_match, wm_attach, NULL, NULL);
531
532 static void wm_82547_txfifo_stall(void *);
533
534 /*
535 * Devices supported by this driver.
536 */
537 static const struct wm_product {
538 pci_vendor_id_t wmp_vendor;
539 pci_product_id_t wmp_product;
540 const char *wmp_name;
541 wm_chip_type wmp_type;
542 int wmp_flags;
543 #define WMP_F_1000X 0x01
544 #define WMP_F_1000T 0x02
545 } wm_products[] = {
546 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
547 "Intel i82542 1000BASE-X Ethernet",
548 WM_T_82542_2_1, WMP_F_1000X },
549
550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
551 "Intel i82543GC 1000BASE-X Ethernet",
552 WM_T_82543, WMP_F_1000X },
553
554 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
555 "Intel i82543GC 1000BASE-T Ethernet",
556 WM_T_82543, WMP_F_1000T },
557
558 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
559 "Intel i82544EI 1000BASE-T Ethernet",
560 WM_T_82544, WMP_F_1000T },
561
562 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
563 "Intel i82544EI 1000BASE-X Ethernet",
564 WM_T_82544, WMP_F_1000X },
565
566 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
567 "Intel i82544GC 1000BASE-T Ethernet",
568 WM_T_82544, WMP_F_1000T },
569
570 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
571 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
572 WM_T_82544, WMP_F_1000T },
573
574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
575 "Intel i82540EM 1000BASE-T Ethernet",
576 WM_T_82540, WMP_F_1000T },
577
578 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
579 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
580 WM_T_82540, WMP_F_1000T },
581
582 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
583 "Intel i82540EP 1000BASE-T Ethernet",
584 WM_T_82540, WMP_F_1000T },
585
586 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
587 "Intel i82540EP 1000BASE-T Ethernet",
588 WM_T_82540, WMP_F_1000T },
589
590 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
591 "Intel i82540EP 1000BASE-T Ethernet",
592 WM_T_82540, WMP_F_1000T },
593
594 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
595 "Intel i82545EM 1000BASE-T Ethernet",
596 WM_T_82545, WMP_F_1000T },
597
598 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
599 "Intel i82545GM 1000BASE-T Ethernet",
600 WM_T_82545_3, WMP_F_1000T },
601
602 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
603 "Intel i82545GM 1000BASE-X Ethernet",
604 WM_T_82545_3, WMP_F_1000X },
605 #if 0
606 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
607 "Intel i82545GM Gigabit Ethernet (SERDES)",
608 WM_T_82545_3, WMP_F_SERDES },
609 #endif
610 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
611 "Intel i82546EB 1000BASE-T Ethernet",
612 WM_T_82546, WMP_F_1000T },
613
614 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
615 "Intel i82546EB 1000BASE-T Ethernet",
616 WM_T_82546, WMP_F_1000T },
617
618 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
619 "Intel i82545EM 1000BASE-X Ethernet",
620 WM_T_82545, WMP_F_1000X },
621
622 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
623 "Intel i82546EB 1000BASE-X Ethernet",
624 WM_T_82546, WMP_F_1000X },
625
626 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
627 "Intel i82546GB 1000BASE-T Ethernet",
628 WM_T_82546_3, WMP_F_1000T },
629
630 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
631 "Intel i82546GB 1000BASE-X Ethernet",
632 WM_T_82546_3, WMP_F_1000X },
633 #if 0
634 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
635 "Intel i82546GB Gigabit Ethernet (SERDES)",
636 WM_T_82546_3, WMP_F_SERDES },
637 #endif
638 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
639 "i82546GB quad-port Gigabit Ethernet",
640 WM_T_82546_3, WMP_F_1000T },
641
642 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
643 "i82546GB quad-port Gigabit Ethernet (KSP3)",
644 WM_T_82546_3, WMP_F_1000T },
645
646 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_PCIE,
647 "Intel PRO/1000MT (82546GB)",
648 WM_T_82546_3, WMP_F_1000T },
649
650 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
651 "Intel i82541EI 1000BASE-T Ethernet",
652 WM_T_82541, WMP_F_1000T },
653
654 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER_LOM,
655 "Intel i82541ER (LOM) 1000BASE-T Ethernet",
656 WM_T_82541, WMP_F_1000T },
657
658 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
659 "Intel i82541EI Mobile 1000BASE-T Ethernet",
660 WM_T_82541, WMP_F_1000T },
661
662 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
663 "Intel i82541ER 1000BASE-T Ethernet",
664 WM_T_82541_2, WMP_F_1000T },
665
666 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
667 "Intel i82541GI 1000BASE-T Ethernet",
668 WM_T_82541_2, WMP_F_1000T },
669
670 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
671 "Intel i82541GI Mobile 1000BASE-T Ethernet",
672 WM_T_82541_2, WMP_F_1000T },
673
674 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541PI,
675 "Intel i82541PI 1000BASE-T Ethernet",
676 WM_T_82541_2, WMP_F_1000T },
677
678 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
679 "Intel i82547EI 1000BASE-T Ethernet",
680 WM_T_82547, WMP_F_1000T },
681
682 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI_MOBILE,
683 "Intel i82547EI Moblie 1000BASE-T Ethernet",
684 WM_T_82547, WMP_F_1000T },
685
686 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
687 "Intel i82547GI 1000BASE-T Ethernet",
688 WM_T_82547_2, WMP_F_1000T },
689
690 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_COPPER,
691 "Intel PRO/1000 PT (82571EB)",
692 WM_T_82571, WMP_F_1000T },
693
694 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_FIBER,
695 "Intel PRO/1000 PF (82571EB)",
696 WM_T_82571, WMP_F_1000X },
697 #if 0
698 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_SERDES,
699 "Intel PRO/1000 PB (82571EB)",
700 WM_T_82571, WMP_F_SERDES },
701 #endif
702 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
703 "Intel PRO/1000 QT (82571EB)",
704 WM_T_82571, WMP_F_1000T },
705
706 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_COPPER,
707 "Intel i82572EI 1000baseT Ethernet",
708 WM_T_82572, WMP_F_1000T },
709
710 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_FIBER,
711 "Intel i82572EI 1000baseX Ethernet",
712 WM_T_82572, WMP_F_1000X },
713 #if 0
714 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI_SERDES,
715 "Intel i82572EI Gigabit Ethernet (SERDES)",
716 WM_T_82572, WMP_F_SERDES },
717 #endif
718
719 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82572EI,
720 "Intel i82572EI 1000baseT Ethernet",
721 WM_T_82572, WMP_F_1000T },
722
723 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E,
724 "Intel i82573E",
725 WM_T_82573, WMP_F_1000T },
726
727 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573E_IAMT,
728 "Intel i82573E IAMT",
729 WM_T_82573, WMP_F_1000T },
730
731 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82573L,
732 "Intel i82573L Gigabit Ethernet",
733 WM_T_82573, WMP_F_1000T },
734
735 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
736 "i80003 dual 1000baseT Ethernet",
737 WM_T_80003, WMP_F_1000T },
738
739 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
740 "i80003 dual 1000baseX Ethernet",
741 WM_T_80003, WMP_F_1000T },
742 #if 0
743 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
744 "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
745 WM_T_80003, WMP_F_SERDES },
746 #endif
747
748 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
749 "Intel i80003 1000baseT Ethernet",
750 WM_T_80003, WMP_F_1000T },
751 #if 0
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
753 "Intel i80003 Gigabit Ethernet (SERDES)",
754 WM_T_80003, WMP_F_SERDES },
755 #endif
756
757 { 0, 0,
758 NULL,
759 0, 0 },
760 };
761
762 #ifdef WM_EVENT_COUNTERS
763 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
764 #endif /* WM_EVENT_COUNTERS */
765
766 #if 0 /* Not currently used */
767 static inline uint32_t
768 wm_io_read(struct wm_softc *sc, int reg)
769 {
770
771 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
772 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
773 }
774 #endif
775
776 static inline void
777 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
778 {
779
780 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
781 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
782 }
783
784 static inline void
785 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
786 {
787 wa->wa_low = htole32(v & 0xffffffffU);
788 if (sizeof(bus_addr_t) == 8)
789 wa->wa_high = htole32((uint64_t) v >> 32);
790 else
791 wa->wa_high = 0;
792 }
793
794 static const struct wm_product *
795 wm_lookup(const struct pci_attach_args *pa)
796 {
797 const struct wm_product *wmp;
798
799 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
800 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
801 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
802 return (wmp);
803 }
804 return (NULL);
805 }
806
807 static int
808 wm_match(struct device *parent, struct cfdata *cf, void *aux)
809 {
810 struct pci_attach_args *pa = aux;
811
812 if (wm_lookup(pa) != NULL)
813 return (1);
814
815 return (0);
816 }
817
818 static void
819 wm_attach(struct device *parent, struct device *self, void *aux)
820 {
821 struct wm_softc *sc = (void *) self;
822 struct pci_attach_args *pa = aux;
823 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
824 pci_chipset_tag_t pc = pa->pa_pc;
825 pci_intr_handle_t ih;
826 size_t cdata_size;
827 const char *intrstr = NULL;
828 const char *eetype;
829 bus_space_tag_t memt;
830 bus_space_handle_t memh;
831 bus_dma_segment_t seg;
832 int memh_valid;
833 int i, rseg, error;
834 const struct wm_product *wmp;
835 prop_data_t ea;
836 prop_number_t pn;
837 uint8_t enaddr[ETHER_ADDR_LEN];
838 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
839 pcireg_t preg, memtype;
840 uint32_t reg;
841
842 callout_init(&sc->sc_tick_ch);
843
844 wmp = wm_lookup(pa);
845 if (wmp == NULL) {
846 printf("\n");
847 panic("wm_attach: impossible");
848 }
849
850 sc->sc_pc = pa->pa_pc;
851 sc->sc_pcitag = pa->pa_tag;
852
853 if (pci_dma64_available(pa))
854 sc->sc_dmat = pa->pa_dmat64;
855 else
856 sc->sc_dmat = pa->pa_dmat;
857
858 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
859 aprint_naive(": Ethernet controller\n");
860 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
861
862 sc->sc_type = wmp->wmp_type;
863 if (sc->sc_type < WM_T_82543) {
864 if (preg < 2) {
865 aprint_error("%s: i82542 must be at least rev. 2\n",
866 sc->sc_dev.dv_xname);
867 return;
868 }
869 if (preg < 3)
870 sc->sc_type = WM_T_82542_2_0;
871 }
872
873 /*
874 * Map the device. All devices support memory-mapped acccess,
875 * and it is really required for normal operation.
876 */
877 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
878 switch (memtype) {
879 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
880 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
881 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
882 memtype, 0, &memt, &memh, NULL, NULL) == 0);
883 break;
884 default:
885 memh_valid = 0;
886 }
887
888 if (memh_valid) {
889 sc->sc_st = memt;
890 sc->sc_sh = memh;
891 } else {
892 aprint_error("%s: unable to map device registers\n",
893 sc->sc_dev.dv_xname);
894 return;
895 }
896
897 /*
898 * In addition, i82544 and later support I/O mapped indirect
899 * register access. It is not desirable (nor supported in
900 * this driver) to use it for normal operation, though it is
901 * required to work around bugs in some chip versions.
902 */
903 if (sc->sc_type >= WM_T_82544) {
904 /* First we have to find the I/O BAR. */
905 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
906 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
907 PCI_MAPREG_TYPE_IO)
908 break;
909 }
910 if (i == PCI_MAPREG_END)
911 aprint_error("%s: WARNING: unable to find I/O BAR\n",
912 sc->sc_dev.dv_xname);
913 else {
914 /*
915 * The i8254x doesn't apparently respond when the
916 * I/O BAR is 0, which looks somewhat like it's not
917 * been configured.
918 */
919 preg = pci_conf_read(pc, pa->pa_tag, i);
920 if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
921 aprint_error("%s: WARNING: I/O BAR at zero.\n",
922 sc->sc_dev.dv_xname);
923 } else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
924 0, &sc->sc_iot, &sc->sc_ioh,
925 NULL, NULL) == 0) {
926 sc->sc_flags |= WM_F_IOH_VALID;
927 } else {
928 aprint_error("%s: WARNING: unable to map "
929 "I/O space\n", sc->sc_dev.dv_xname);
930 }
931 }
932
933 }
934
935 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
936 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
937 preg |= PCI_COMMAND_MASTER_ENABLE;
938 if (sc->sc_type < WM_T_82542_2_1)
939 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
940 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
941
942 /* power up chip */
943 if ((error = pci_activate(pa->pa_pc, pa->pa_tag, sc,
944 NULL)) && error != EOPNOTSUPP) {
945 aprint_error("%s: cannot activate %d\n", sc->sc_dev.dv_xname,
946 error);
947 return;
948 }
949
950 /*
951 * Map and establish our interrupt.
952 */
953 if (pci_intr_map(pa, &ih)) {
954 aprint_error("%s: unable to map interrupt\n",
955 sc->sc_dev.dv_xname);
956 return;
957 }
958 intrstr = pci_intr_string(pc, ih);
959 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
960 if (sc->sc_ih == NULL) {
961 aprint_error("%s: unable to establish interrupt",
962 sc->sc_dev.dv_xname);
963 if (intrstr != NULL)
964 aprint_normal(" at %s", intrstr);
965 aprint_normal("\n");
966 return;
967 }
968 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
969
970 /*
971 * Determine a few things about the bus we're connected to.
972 */
973 if (sc->sc_type < WM_T_82543) {
974 /* We don't really know the bus characteristics here. */
975 sc->sc_bus_speed = 33;
976 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
977 /*
978 * CSA (Communication Streaming Architecture) is about as fast
979 * a 32-bit 66MHz PCI Bus.
980 */
981 sc->sc_flags |= WM_F_CSA;
982 sc->sc_bus_speed = 66;
983 aprint_verbose("%s: Communication Streaming Architecture\n",
984 sc->sc_dev.dv_xname);
985 if (sc->sc_type == WM_T_82547) {
986 callout_init(&sc->sc_txfifo_ch);
987 callout_setfunc(&sc->sc_txfifo_ch,
988 wm_82547_txfifo_stall, sc);
989 aprint_verbose("%s: using 82547 Tx FIFO stall "
990 "work-around\n", sc->sc_dev.dv_xname);
991 }
992 } else if (sc->sc_type >= WM_T_82571) {
993 sc->sc_flags |= WM_F_PCIE | WM_F_EEPROM_SEMAPHORE;
994 aprint_verbose("%s: PCI-Express bus\n", sc->sc_dev.dv_xname);
995 } else {
996 reg = CSR_READ(sc, WMREG_STATUS);
997 if (reg & STATUS_BUS64)
998 sc->sc_flags |= WM_F_BUS64;
999 if (sc->sc_type >= WM_T_82544 &&
1000 (reg & STATUS_PCIX_MODE) != 0) {
1001 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1002
1003 sc->sc_flags |= WM_F_PCIX;
1004 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1005 PCI_CAP_PCIX,
1006 &sc->sc_pcix_offset, NULL) == 0)
1007 aprint_error("%s: unable to find PCIX "
1008 "capability\n", sc->sc_dev.dv_xname);
1009 else if (sc->sc_type != WM_T_82545_3 &&
1010 sc->sc_type != WM_T_82546_3) {
1011 /*
1012 * Work around a problem caused by the BIOS
1013 * setting the max memory read byte count
1014 * incorrectly.
1015 */
1016 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1017 sc->sc_pcix_offset + PCI_PCIX_CMD);
1018 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1019 sc->sc_pcix_offset + PCI_PCIX_STATUS);
1020
1021 bytecnt =
1022 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
1023 PCI_PCIX_CMD_BYTECNT_SHIFT;
1024 maxb =
1025 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
1026 PCI_PCIX_STATUS_MAXB_SHIFT;
1027 if (bytecnt > maxb) {
1028 aprint_verbose("%s: resetting PCI-X "
1029 "MMRBC: %d -> %d\n",
1030 sc->sc_dev.dv_xname,
1031 512 << bytecnt, 512 << maxb);
1032 pcix_cmd = (pcix_cmd &
1033 ~PCI_PCIX_CMD_BYTECNT_MASK) |
1034 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
1035 pci_conf_write(pa->pa_pc, pa->pa_tag,
1036 sc->sc_pcix_offset + PCI_PCIX_CMD,
1037 pcix_cmd);
1038 }
1039 }
1040 }
1041 /*
1042 * The quad port adapter is special; it has a PCIX-PCIX
1043 * bridge on the board, and can run the secondary bus at
1044 * a higher speed.
1045 */
1046 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1047 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1048 : 66;
1049 } else if (sc->sc_flags & WM_F_PCIX) {
1050 switch (reg & STATUS_PCIXSPD_MASK) {
1051 case STATUS_PCIXSPD_50_66:
1052 sc->sc_bus_speed = 66;
1053 break;
1054 case STATUS_PCIXSPD_66_100:
1055 sc->sc_bus_speed = 100;
1056 break;
1057 case STATUS_PCIXSPD_100_133:
1058 sc->sc_bus_speed = 133;
1059 break;
1060 default:
1061 aprint_error(
1062 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
1063 sc->sc_dev.dv_xname,
1064 reg & STATUS_PCIXSPD_MASK);
1065 sc->sc_bus_speed = 66;
1066 }
1067 } else
1068 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1069 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
1070 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1071 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1072 }
1073
1074 /*
1075 * Allocate the control data structures, and create and load the
1076 * DMA map for it.
1077 *
1078 * NOTE: All Tx descriptors must be in the same 4G segment of
1079 * memory. So must Rx descriptors. We simplify by allocating
1080 * both sets within the same 4G segment.
1081 */
1082 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1083 WM_NTXDESC_82542 : WM_NTXDESC_82544;
1084 cdata_size = sc->sc_type < WM_T_82544 ?
1085 sizeof(struct wm_control_data_82542) :
1086 sizeof(struct wm_control_data_82544);
1087 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
1088 (bus_size_t) 0x100000000ULL,
1089 &seg, 1, &rseg, 0)) != 0) {
1090 aprint_error(
1091 "%s: unable to allocate control data, error = %d\n",
1092 sc->sc_dev.dv_xname, error);
1093 goto fail_0;
1094 }
1095
1096 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
1097 (caddr_t *)&sc->sc_control_data, 0)) != 0) {
1098 aprint_error("%s: unable to map control data, error = %d\n",
1099 sc->sc_dev.dv_xname, error);
1100 goto fail_1;
1101 }
1102
1103 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
1104 0, 0, &sc->sc_cddmamap)) != 0) {
1105 aprint_error("%s: unable to create control data DMA map, "
1106 "error = %d\n", sc->sc_dev.dv_xname, error);
1107 goto fail_2;
1108 }
1109
1110 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1111 sc->sc_control_data, cdata_size, NULL,
1112 0)) != 0) {
1113 aprint_error(
1114 "%s: unable to load control data DMA map, error = %d\n",
1115 sc->sc_dev.dv_xname, error);
1116 goto fail_3;
1117 }
1118
1119
1120 /*
1121 * Create the transmit buffer DMA maps.
1122 */
1123 WM_TXQUEUELEN(sc) =
1124 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1125 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1126 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1127 if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1128 WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1129 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1130 aprint_error("%s: unable to create Tx DMA map %d, "
1131 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1132 goto fail_4;
1133 }
1134 }
1135
1136 /*
1137 * Create the receive buffer DMA maps.
1138 */
1139 for (i = 0; i < WM_NRXDESC; i++) {
1140 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1141 MCLBYTES, 0, 0,
1142 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1143 aprint_error("%s: unable to create Rx DMA map %d, "
1144 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1145 goto fail_5;
1146 }
1147 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1148 }
1149
1150 /* clear interesting stat counters */
1151 CSR_READ(sc, WMREG_COLC);
1152 CSR_READ(sc, WMREG_RXERRC);
1153
1154 /*
1155 * Reset the chip to a known state.
1156 */
1157 wm_reset(sc);
1158
1159 /*
1160 * Get some information about the EEPROM.
1161 */
1162 if (sc->sc_type == WM_T_80003)
1163 sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1164 else if (sc->sc_type == WM_T_82573)
1165 sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1166 else if (sc->sc_type > WM_T_82544)
1167 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1168
1169 if (sc->sc_type <= WM_T_82544)
1170 sc->sc_ee_addrbits = 6;
1171 else if (sc->sc_type <= WM_T_82546_3) {
1172 reg = CSR_READ(sc, WMREG_EECD);
1173 if (reg & EECD_EE_SIZE)
1174 sc->sc_ee_addrbits = 8;
1175 else
1176 sc->sc_ee_addrbits = 6;
1177 } else if (sc->sc_type <= WM_T_82547_2) {
1178 reg = CSR_READ(sc, WMREG_EECD);
1179 if (reg & EECD_EE_TYPE) {
1180 sc->sc_flags |= WM_F_EEPROM_SPI;
1181 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1182 } else
1183 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1184 } else if ((sc->sc_type == WM_T_82573) &&
1185 (wm_is_onboard_nvm_eeprom(sc) == 0)) {
1186 sc->sc_flags |= WM_F_EEPROM_FLASH;
1187 } else {
1188 /* Assume everything else is SPI. */
1189 reg = CSR_READ(sc, WMREG_EECD);
1190 sc->sc_flags |= WM_F_EEPROM_SPI;
1191 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1192 }
1193
1194 /*
1195 * Defer printing the EEPROM type until after verifying the checksum
1196 * This allows the EEPROM type to be printed correctly in the case
1197 * that no EEPROM is attached.
1198 */
1199
1200
1201 /*
1202 * Validate the EEPROM checksum. If the checksum fails, flag this for
1203 * later, so we can fail future reads from the EEPROM.
1204 */
1205 if (wm_validate_eeprom_checksum(sc))
1206 sc->sc_flags |= WM_F_EEPROM_INVALID;
1207
1208 if (sc->sc_flags & WM_F_EEPROM_INVALID)
1209 aprint_verbose("%s: No EEPROM\n", sc->sc_dev.dv_xname);
1210 else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1211 aprint_verbose("%s: FLASH\n", sc->sc_dev.dv_xname);
1212 } else {
1213 if (sc->sc_flags & WM_F_EEPROM_SPI)
1214 eetype = "SPI";
1215 else
1216 eetype = "MicroWire";
1217 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1218 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1219 sc->sc_ee_addrbits, eetype);
1220 }
1221
1222 /*
1223 * Read the Ethernet address from the EEPROM, if not first found
1224 * in device properties.
1225 */
1226 ea = prop_dictionary_get(device_properties(&sc->sc_dev), "mac-addr");
1227 if (ea != NULL) {
1228 KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1229 KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1230 memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1231 } else {
1232 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1233 sizeof(myea) / sizeof(myea[0]), myea)) {
1234 aprint_error("%s: unable to read Ethernet address\n",
1235 sc->sc_dev.dv_xname);
1236 return;
1237 }
1238 enaddr[0] = myea[0] & 0xff;
1239 enaddr[1] = myea[0] >> 8;
1240 enaddr[2] = myea[1] & 0xff;
1241 enaddr[3] = myea[1] >> 8;
1242 enaddr[4] = myea[2] & 0xff;
1243 enaddr[5] = myea[2] >> 8;
1244 }
1245
1246 /*
1247 * Toggle the LSB of the MAC address on the second port
1248 * of the dual port controller.
1249 */
1250 if (sc->sc_type == WM_T_82546 || sc->sc_type == WM_T_82546_3
1251 || sc->sc_type == WM_T_82571 || sc->sc_type == WM_T_80003) {
1252 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1253 enaddr[5] ^= 1;
1254 }
1255
1256 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1257 ether_sprintf(enaddr));
1258
1259 /*
1260 * Read the config info from the EEPROM, and set up various
1261 * bits in the control registers based on their contents.
1262 */
1263 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1264 "i82543-cfg1");
1265 if (pn != NULL) {
1266 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1267 cfg1 = (uint16_t) prop_number_integer_value(pn);
1268 } else {
1269 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1270 aprint_error("%s: unable to read CFG1\n",
1271 sc->sc_dev.dv_xname);
1272 return;
1273 }
1274 }
1275
1276 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1277 "i82543-cfg2");
1278 if (pn != NULL) {
1279 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1280 cfg2 = (uint16_t) prop_number_integer_value(pn);
1281 } else {
1282 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1283 aprint_error("%s: unable to read CFG2\n",
1284 sc->sc_dev.dv_xname);
1285 return;
1286 }
1287 }
1288
1289 if (sc->sc_type >= WM_T_82544) {
1290 pn = prop_dictionary_get(device_properties(&sc->sc_dev),
1291 "i82543-swdpin");
1292 if (pn != NULL) {
1293 KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1294 swdpin = (uint16_t) prop_number_integer_value(pn);
1295 } else {
1296 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1297 aprint_error("%s: unable to read SWDPIN\n",
1298 sc->sc_dev.dv_xname);
1299 return;
1300 }
1301 }
1302 }
1303
1304 if (cfg1 & EEPROM_CFG1_ILOS)
1305 sc->sc_ctrl |= CTRL_ILOS;
1306 if (sc->sc_type >= WM_T_82544) {
1307 sc->sc_ctrl |=
1308 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1309 CTRL_SWDPIO_SHIFT;
1310 sc->sc_ctrl |=
1311 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1312 CTRL_SWDPINS_SHIFT;
1313 } else {
1314 sc->sc_ctrl |=
1315 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1316 CTRL_SWDPIO_SHIFT;
1317 }
1318
1319 #if 0
1320 if (sc->sc_type >= WM_T_82544) {
1321 if (cfg1 & EEPROM_CFG1_IPS0)
1322 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1323 if (cfg1 & EEPROM_CFG1_IPS1)
1324 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1325 sc->sc_ctrl_ext |=
1326 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1327 CTRL_EXT_SWDPIO_SHIFT;
1328 sc->sc_ctrl_ext |=
1329 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1330 CTRL_EXT_SWDPINS_SHIFT;
1331 } else {
1332 sc->sc_ctrl_ext |=
1333 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1334 CTRL_EXT_SWDPIO_SHIFT;
1335 }
1336 #endif
1337
1338 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1339 #if 0
1340 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1341 #endif
1342
1343 /*
1344 * Set up some register offsets that are different between
1345 * the i82542 and the i82543 and later chips.
1346 */
1347 if (sc->sc_type < WM_T_82543) {
1348 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1349 sc->sc_tdt_reg = WMREG_OLD_TDT;
1350 } else {
1351 sc->sc_rdt_reg = WMREG_RDT;
1352 sc->sc_tdt_reg = WMREG_TDT;
1353 }
1354
1355 /*
1356 * Determine if we're TBI or GMII mode, and initialize the
1357 * media structures accordingly.
1358 */
1359 if (sc->sc_type < WM_T_82543 ||
1360 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1361 if (wmp->wmp_flags & WMP_F_1000T)
1362 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1363 "product!\n", sc->sc_dev.dv_xname);
1364 wm_tbi_mediainit(sc);
1365 } else {
1366 if (wmp->wmp_flags & WMP_F_1000X)
1367 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1368 "product!\n", sc->sc_dev.dv_xname);
1369 wm_gmii_mediainit(sc);
1370 }
1371
1372 ifp = &sc->sc_ethercom.ec_if;
1373 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1374 ifp->if_softc = sc;
1375 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1376 ifp->if_ioctl = wm_ioctl;
1377 ifp->if_start = wm_start;
1378 ifp->if_watchdog = wm_watchdog;
1379 ifp->if_init = wm_init;
1380 ifp->if_stop = wm_stop;
1381 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1382 IFQ_SET_READY(&ifp->if_snd);
1383
1384 if (sc->sc_type != WM_T_82573)
1385 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1386
1387 /*
1388 * If we're a i82543 or greater, we can support VLANs.
1389 */
1390 if (sc->sc_type >= WM_T_82543)
1391 sc->sc_ethercom.ec_capabilities |=
1392 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1393
1394 /*
1395 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1396 * on i82543 and later.
1397 */
1398 if (sc->sc_type >= WM_T_82543) {
1399 ifp->if_capabilities |=
1400 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1401 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1402 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1403 IFCAP_CSUM_TCPv6_Tx |
1404 IFCAP_CSUM_UDPv6_Tx;
1405 }
1406
1407 /*
1408 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1409 *
1410 * 82541GI (8086:1076) ... no
1411 * 82572EI (8086:10b9) ... yes
1412 */
1413 if (sc->sc_type >= WM_T_82571) {
1414 ifp->if_capabilities |=
1415 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
1416 }
1417
1418 /*
1419 * If we're a i82544 or greater (except i82547), we can do
1420 * TCP segmentation offload.
1421 */
1422 if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547)
1423 ifp->if_capabilities |= IFCAP_TSOv4;
1424
1425 /*
1426 * Attach the interface.
1427 */
1428 if_attach(ifp);
1429 ether_ifattach(ifp, enaddr);
1430 #if NRND > 0
1431 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1432 RND_TYPE_NET, 0);
1433 #endif
1434
1435 #ifdef WM_EVENT_COUNTERS
1436 /* Attach event counters. */
1437 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1438 NULL, sc->sc_dev.dv_xname, "txsstall");
1439 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1440 NULL, sc->sc_dev.dv_xname, "txdstall");
1441 evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
1442 NULL, sc->sc_dev.dv_xname, "txfifo_stall");
1443 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1444 NULL, sc->sc_dev.dv_xname, "txdw");
1445 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1446 NULL, sc->sc_dev.dv_xname, "txqe");
1447 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1448 NULL, sc->sc_dev.dv_xname, "rxintr");
1449 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1450 NULL, sc->sc_dev.dv_xname, "linkintr");
1451
1452 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1453 NULL, sc->sc_dev.dv_xname, "rxipsum");
1454 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1455 NULL, sc->sc_dev.dv_xname, "rxtusum");
1456 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1457 NULL, sc->sc_dev.dv_xname, "txipsum");
1458 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1459 NULL, sc->sc_dev.dv_xname, "txtusum");
1460 evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
1461 NULL, sc->sc_dev.dv_xname, "txtusum6");
1462
1463 evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
1464 NULL, sc->sc_dev.dv_xname, "txtso");
1465 evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
1466 NULL, sc->sc_dev.dv_xname, "txtsopain");
1467
1468 for (i = 0; i < WM_NTXSEGS; i++) {
1469 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1470 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1471 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1472 }
1473
1474 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1475 NULL, sc->sc_dev.dv_xname, "txdrop");
1476
1477 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1478 NULL, sc->sc_dev.dv_xname, "tu");
1479
1480 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1481 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1482 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1483 NULL, sc->sc_dev.dv_xname, "tx_xon");
1484 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1485 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1486 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1487 NULL, sc->sc_dev.dv_xname, "rx_xon");
1488 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1489 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1490 #endif /* WM_EVENT_COUNTERS */
1491
1492 /*
1493 * Make sure the interface is shutdown during reboot.
1494 */
1495 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1496 if (sc->sc_sdhook == NULL)
1497 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1498 sc->sc_dev.dv_xname);
1499
1500 sc->sc_powerhook = powerhook_establish(sc->sc_dev.dv_xname,
1501 wm_powerhook, sc);
1502 if (sc->sc_powerhook == NULL)
1503 aprint_error("%s: can't establish powerhook\n",
1504 sc->sc_dev.dv_xname);
1505 return;
1506
1507 /*
1508 * Free any resources we've allocated during the failed attach
1509 * attempt. Do this in reverse order and fall through.
1510 */
1511 fail_5:
1512 for (i = 0; i < WM_NRXDESC; i++) {
1513 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1514 bus_dmamap_destroy(sc->sc_dmat,
1515 sc->sc_rxsoft[i].rxs_dmamap);
1516 }
1517 fail_4:
1518 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1519 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1520 bus_dmamap_destroy(sc->sc_dmat,
1521 sc->sc_txsoft[i].txs_dmamap);
1522 }
1523 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1524 fail_3:
1525 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1526 fail_2:
1527 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1528 cdata_size);
1529 fail_1:
1530 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1531 fail_0:
1532 return;
1533 }
1534
1535 /*
1536 * wm_shutdown:
1537 *
1538 * Make sure the interface is stopped at reboot time.
1539 */
1540 static void
1541 wm_shutdown(void *arg)
1542 {
1543 struct wm_softc *sc = arg;
1544
1545 wm_stop(&sc->sc_ethercom.ec_if, 1);
1546 }
1547
1548 static void
1549 wm_powerhook(int why, void *arg)
1550 {
1551 struct wm_softc *sc = arg;
1552 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1553 pci_chipset_tag_t pc = sc->sc_pc;
1554 pcitag_t tag = sc->sc_pcitag;
1555
1556 switch (why) {
1557 case PWR_SOFTSUSPEND:
1558 wm_shutdown(sc);
1559 break;
1560 case PWR_SOFTRESUME:
1561 ifp->if_flags &= ~IFF_RUNNING;
1562 wm_init(ifp);
1563 if (ifp->if_flags & IFF_RUNNING)
1564 wm_start(ifp);
1565 break;
1566 case PWR_SUSPEND:
1567 pci_conf_capture(pc, tag, &sc->sc_pciconf);
1568 break;
1569 case PWR_RESUME:
1570 pci_conf_restore(pc, tag, &sc->sc_pciconf);
1571 break;
1572 }
1573
1574 return;
1575 }
1576
1577 /*
1578 * wm_tx_offload:
1579 *
1580 * Set up TCP/IP checksumming parameters for the
1581 * specified packet.
1582 */
1583 static int
1584 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1585 uint8_t *fieldsp)
1586 {
1587 struct mbuf *m0 = txs->txs_mbuf;
1588 struct livengood_tcpip_ctxdesc *t;
1589 uint32_t ipcs, tucs, cmd, cmdlen, seg;
1590 struct ether_header *eh;
1591 int offset, iphl;
1592 uint8_t fields;
1593
1594 /*
1595 * XXX It would be nice if the mbuf pkthdr had offset
1596 * fields for the protocol headers.
1597 */
1598
1599 eh = mtod(m0, struct ether_header *);
1600 switch (htons(eh->ether_type)) {
1601 case ETHERTYPE_IP:
1602 case ETHERTYPE_IPV6:
1603 offset = ETHER_HDR_LEN;
1604 break;
1605
1606 case ETHERTYPE_VLAN:
1607 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1608 break;
1609
1610 default:
1611 /*
1612 * Don't support this protocol or encapsulation.
1613 */
1614 *fieldsp = 0;
1615 *cmdp = 0;
1616 return (0);
1617 }
1618
1619 if ((m0->m_pkthdr.csum_flags &
1620 (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
1621 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
1622 } else {
1623 iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
1624 }
1625
1626 cmd = WTX_CMD_DEXT | WTX_DTYP_D;
1627 cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
1628 seg = 0;
1629 fields = 0;
1630
1631 if (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) {
1632 int hlen = offset + iphl;
1633 WM_EVCNT_INCR(&sc->sc_ev_txtso);
1634 if (__predict_false(m0->m_len <
1635 (hlen + sizeof(struct tcphdr)))) {
1636 /*
1637 * TCP/IP headers are not in the first mbuf; we need
1638 * to do this the slow and painful way. Let's just
1639 * hope this doesn't happen very often.
1640 */
1641 struct ip ip;
1642 struct tcphdr th;
1643
1644 WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
1645
1646 m_copydata(m0, offset, sizeof(ip), &ip);
1647 m_copydata(m0, hlen, sizeof(th), &th);
1648
1649 ip.ip_len = 0;
1650
1651 m_copyback(m0, offset + offsetof(struct ip, ip_len),
1652 sizeof(ip.ip_len), &ip.ip_len);
1653
1654 th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
1655 ip.ip_dst.s_addr, htons(IPPROTO_TCP));
1656
1657 m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
1658 sizeof(th.th_sum), &th.th_sum);
1659
1660 hlen += th.th_off << 2;
1661 } else {
1662 /*
1663 * TCP/IP headers are in the first mbuf; we can do
1664 * this the easy way.
1665 */
1666 struct ip *ip =
1667 (struct ip *) (mtod(m0, caddr_t) + offset);
1668 struct tcphdr *th =
1669 (struct tcphdr *) (mtod(m0, caddr_t) + hlen);
1670
1671 ip->ip_len = 0;
1672 th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
1673 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
1674
1675 hlen += th->th_off << 2;
1676 }
1677
1678 cmd |= WTX_TCPIP_CMD_TSE;
1679 cmdlen |= WTX_TCPIP_CMD_TSE | WTX_TCPIP_CMD_IP |
1680 WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
1681 seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
1682 WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
1683 }
1684
1685 /*
1686 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1687 * offload feature, if we load the context descriptor, we
1688 * MUST provide valid values for IPCSS and TUCSS fields.
1689 */
1690
1691 ipcs = WTX_TCPIP_IPCSS(offset) |
1692 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1693 WTX_TCPIP_IPCSE(offset + iphl - 1);
1694 if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
1695 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1696 fields |= WTX_IXSM;
1697 }
1698
1699 offset += iphl;
1700
1701 if (m0->m_pkthdr.csum_flags &
1702 (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
1703 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1704 fields |= WTX_TXSM;
1705 tucs = WTX_TCPIP_TUCSS(offset) |
1706 WTX_TCPIP_TUCSO(offset +
1707 M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
1708 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1709 } else if ((m0->m_pkthdr.csum_flags &
1710 (M_CSUM_TCPv6|M_CSUM_UDPv6)) != 0) {
1711 WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
1712 fields |= WTX_TXSM;
1713 tucs = WTX_TCPIP_TUCSS(offset) |
1714 WTX_TCPIP_TUCSO(offset +
1715 M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
1716 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1717 } else {
1718 /* Just initialize it to a valid TCP context. */
1719 tucs = WTX_TCPIP_TUCSS(offset) |
1720 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1721 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1722 }
1723
1724 /* Fill in the context descriptor. */
1725 t = (struct livengood_tcpip_ctxdesc *)
1726 &sc->sc_txdescs[sc->sc_txnext];
1727 t->tcpip_ipcs = htole32(ipcs);
1728 t->tcpip_tucs = htole32(tucs);
1729 t->tcpip_cmdlen = htole32(cmdlen);
1730 t->tcpip_seg = htole32(seg);
1731 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1732
1733 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1734 txs->txs_ndesc++;
1735
1736 *cmdp = cmd;
1737 *fieldsp = fields;
1738
1739 return (0);
1740 }
1741
1742 static void
1743 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1744 {
1745 struct mbuf *m;
1746 int i;
1747
1748 log(LOG_DEBUG, "%s: mbuf chain:\n", sc->sc_dev.dv_xname);
1749 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1750 log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
1751 "m_flags = 0x%08x\n", sc->sc_dev.dv_xname,
1752 m->m_data, m->m_len, m->m_flags);
1753 log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", sc->sc_dev.dv_xname,
1754 i, i == 1 ? "" : "s");
1755 }
1756
1757 /*
1758 * wm_82547_txfifo_stall:
1759 *
1760 * Callout used to wait for the 82547 Tx FIFO to drain,
1761 * reset the FIFO pointers, and restart packet transmission.
1762 */
1763 static void
1764 wm_82547_txfifo_stall(void *arg)
1765 {
1766 struct wm_softc *sc = arg;
1767 int s;
1768
1769 s = splnet();
1770
1771 if (sc->sc_txfifo_stall) {
1772 if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
1773 CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
1774 CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
1775 /*
1776 * Packets have drained. Stop transmitter, reset
1777 * FIFO pointers, restart transmitter, and kick
1778 * the packet queue.
1779 */
1780 uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
1781 CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
1782 CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
1783 CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
1784 CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
1785 CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
1786 CSR_WRITE(sc, WMREG_TCTL, tctl);
1787 CSR_WRITE_FLUSH(sc);
1788
1789 sc->sc_txfifo_head = 0;
1790 sc->sc_txfifo_stall = 0;
1791 wm_start(&sc->sc_ethercom.ec_if);
1792 } else {
1793 /*
1794 * Still waiting for packets to drain; try again in
1795 * another tick.
1796 */
1797 callout_schedule(&sc->sc_txfifo_ch, 1);
1798 }
1799 }
1800
1801 splx(s);
1802 }
1803
1804 /*
1805 * wm_82547_txfifo_bugchk:
1806 *
1807 * Check for bug condition in the 82547 Tx FIFO. We need to
1808 * prevent enqueueing a packet that would wrap around the end
1809 * if the Tx FIFO ring buffer, otherwise the chip will croak.
1810 *
1811 * We do this by checking the amount of space before the end
1812 * of the Tx FIFO buffer. If the packet will not fit, we "stall"
1813 * the Tx FIFO, wait for all remaining packets to drain, reset
1814 * the internal FIFO pointers to the beginning, and restart
1815 * transmission on the interface.
1816 */
1817 #define WM_FIFO_HDR 0x10
1818 #define WM_82547_PAD_LEN 0x3e0
1819 static int
1820 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
1821 {
1822 int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
1823 int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
1824
1825 /* Just return if already stalled. */
1826 if (sc->sc_txfifo_stall)
1827 return (1);
1828
1829 if (sc->sc_mii.mii_media_active & IFM_FDX) {
1830 /* Stall only occurs in half-duplex mode. */
1831 goto send_packet;
1832 }
1833
1834 if (len >= WM_82547_PAD_LEN + space) {
1835 sc->sc_txfifo_stall = 1;
1836 callout_schedule(&sc->sc_txfifo_ch, 1);
1837 return (1);
1838 }
1839
1840 send_packet:
1841 sc->sc_txfifo_head += len;
1842 if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
1843 sc->sc_txfifo_head -= sc->sc_txfifo_size;
1844
1845 return (0);
1846 }
1847
1848 /*
1849 * wm_start: [ifnet interface function]
1850 *
1851 * Start packet transmission on the interface.
1852 */
1853 static void
1854 wm_start(struct ifnet *ifp)
1855 {
1856 struct wm_softc *sc = ifp->if_softc;
1857 struct mbuf *m0;
1858 #if 0 /* XXXJRT */
1859 struct m_tag *mtag;
1860 #endif
1861 struct wm_txsoft *txs;
1862 bus_dmamap_t dmamap;
1863 int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
1864 bus_addr_t curaddr;
1865 bus_size_t seglen, curlen;
1866 uint32_t cksumcmd;
1867 uint8_t cksumfields;
1868
1869 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1870 return;
1871
1872 /*
1873 * Remember the previous number of free descriptors.
1874 */
1875 ofree = sc->sc_txfree;
1876
1877 /*
1878 * Loop through the send queue, setting up transmit descriptors
1879 * until we drain the queue, or use up all available transmit
1880 * descriptors.
1881 */
1882 for (;;) {
1883 /* Grab a packet off the queue. */
1884 IFQ_POLL(&ifp->if_snd, m0);
1885 if (m0 == NULL)
1886 break;
1887
1888 DPRINTF(WM_DEBUG_TX,
1889 ("%s: TX: have packet to transmit: %p\n",
1890 sc->sc_dev.dv_xname, m0));
1891
1892 /* Get a work queue entry. */
1893 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
1894 wm_txintr(sc);
1895 if (sc->sc_txsfree == 0) {
1896 DPRINTF(WM_DEBUG_TX,
1897 ("%s: TX: no free job descriptors\n",
1898 sc->sc_dev.dv_xname));
1899 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1900 break;
1901 }
1902 }
1903
1904 txs = &sc->sc_txsoft[sc->sc_txsnext];
1905 dmamap = txs->txs_dmamap;
1906
1907 use_tso = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
1908
1909 /*
1910 * So says the Linux driver:
1911 * The controller does a simple calculation to make sure
1912 * there is enough room in the FIFO before initiating the
1913 * DMA for each buffer. The calc is:
1914 * 4 = ceil(buffer len / MSS)
1915 * To make sure we don't overrun the FIFO, adjust the max
1916 * buffer len if the MSS drops.
1917 */
1918 dmamap->dm_maxsegsz =
1919 (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
1920 ? m0->m_pkthdr.segsz << 2
1921 : WTX_MAX_LEN;
1922
1923 /*
1924 * Load the DMA map. If this fails, the packet either
1925 * didn't fit in the allotted number of segments, or we
1926 * were short on resources. For the too-many-segments
1927 * case, we simply report an error and drop the packet,
1928 * since we can't sanely copy a jumbo packet to a single
1929 * buffer.
1930 */
1931 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1932 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1933 if (error) {
1934 if (error == EFBIG) {
1935 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1936 log(LOG_ERR, "%s: Tx packet consumes too many "
1937 "DMA segments, dropping...\n",
1938 sc->sc_dev.dv_xname);
1939 IFQ_DEQUEUE(&ifp->if_snd, m0);
1940 wm_dump_mbuf_chain(sc, m0);
1941 m_freem(m0);
1942 continue;
1943 }
1944 /*
1945 * Short on resources, just stop for now.
1946 */
1947 DPRINTF(WM_DEBUG_TX,
1948 ("%s: TX: dmamap load failed: %d\n",
1949 sc->sc_dev.dv_xname, error));
1950 break;
1951 }
1952
1953 segs_needed = dmamap->dm_nsegs;
1954 if (use_tso) {
1955 /* For sentinel descriptor; see below. */
1956 segs_needed++;
1957 }
1958
1959 /*
1960 * Ensure we have enough descriptors free to describe
1961 * the packet. Note, we always reserve one descriptor
1962 * at the end of the ring due to the semantics of the
1963 * TDT register, plus one more in the event we need
1964 * to load offload context.
1965 */
1966 if (segs_needed > sc->sc_txfree - 2) {
1967 /*
1968 * Not enough free descriptors to transmit this
1969 * packet. We haven't committed anything yet,
1970 * so just unload the DMA map, put the packet
1971 * pack on the queue, and punt. Notify the upper
1972 * layer that there are no more slots left.
1973 */
1974 DPRINTF(WM_DEBUG_TX,
1975 ("%s: TX: need %d (%d) descriptors, have %d\n",
1976 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed,
1977 sc->sc_txfree - 1));
1978 ifp->if_flags |= IFF_OACTIVE;
1979 bus_dmamap_unload(sc->sc_dmat, dmamap);
1980 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1981 break;
1982 }
1983
1984 /*
1985 * Check for 82547 Tx FIFO bug. We need to do this
1986 * once we know we can transmit the packet, since we
1987 * do some internal FIFO space accounting here.
1988 */
1989 if (sc->sc_type == WM_T_82547 &&
1990 wm_82547_txfifo_bugchk(sc, m0)) {
1991 DPRINTF(WM_DEBUG_TX,
1992 ("%s: TX: 82547 Tx FIFO bug detected\n",
1993 sc->sc_dev.dv_xname));
1994 ifp->if_flags |= IFF_OACTIVE;
1995 bus_dmamap_unload(sc->sc_dmat, dmamap);
1996 WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
1997 break;
1998 }
1999
2000 IFQ_DEQUEUE(&ifp->if_snd, m0);
2001
2002 /*
2003 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2004 */
2005
2006 DPRINTF(WM_DEBUG_TX,
2007 ("%s: TX: packet has %d (%d) DMA segments\n",
2008 sc->sc_dev.dv_xname, dmamap->dm_nsegs, segs_needed));
2009
2010 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2011
2012 /*
2013 * Store a pointer to the packet so that we can free it
2014 * later.
2015 *
2016 * Initially, we consider the number of descriptors the
2017 * packet uses the number of DMA segments. This may be
2018 * incremented by 1 if we do checksum offload (a descriptor
2019 * is used to set the checksum context).
2020 */
2021 txs->txs_mbuf = m0;
2022 txs->txs_firstdesc = sc->sc_txnext;
2023 txs->txs_ndesc = segs_needed;
2024
2025 /* Set up offload parameters for this packet. */
2026 if (m0->m_pkthdr.csum_flags &
2027 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2028 M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2029 if (wm_tx_offload(sc, txs, &cksumcmd,
2030 &cksumfields) != 0) {
2031 /* Error message already displayed. */
2032 bus_dmamap_unload(sc->sc_dmat, dmamap);
2033 continue;
2034 }
2035 } else {
2036 cksumcmd = 0;
2037 cksumfields = 0;
2038 }
2039
2040 cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2041
2042 /* Sync the DMA map. */
2043 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2044 BUS_DMASYNC_PREWRITE);
2045
2046 /*
2047 * Initialize the transmit descriptor.
2048 */
2049 for (nexttx = sc->sc_txnext, seg = 0;
2050 seg < dmamap->dm_nsegs; seg++) {
2051 for (seglen = dmamap->dm_segs[seg].ds_len,
2052 curaddr = dmamap->dm_segs[seg].ds_addr;
2053 seglen != 0;
2054 curaddr += curlen, seglen -= curlen,
2055 nexttx = WM_NEXTTX(sc, nexttx)) {
2056 curlen = seglen;
2057
2058 /*
2059 * So says the Linux driver:
2060 * Work around for premature descriptor
2061 * write-backs in TSO mode. Append a
2062 * 4-byte sentinel descriptor.
2063 */
2064 if (use_tso &&
2065 seg == dmamap->dm_nsegs - 1 &&
2066 curlen > 8)
2067 curlen -= 4;
2068
2069 wm_set_dma_addr(
2070 &sc->sc_txdescs[nexttx].wtx_addr,
2071 curaddr);
2072 sc->sc_txdescs[nexttx].wtx_cmdlen =
2073 htole32(cksumcmd | curlen);
2074 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2075 0;
2076 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2077 cksumfields;
2078 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2079 lasttx = nexttx;
2080
2081 DPRINTF(WM_DEBUG_TX,
2082 ("%s: TX: desc %d: low 0x%08lx, "
2083 "len 0x%04x\n",
2084 sc->sc_dev.dv_xname, nexttx,
2085 curaddr & 0xffffffffUL, (unsigned)curlen));
2086 }
2087 }
2088
2089 KASSERT(lasttx != -1);
2090
2091 /*
2092 * Set up the command byte on the last descriptor of
2093 * the packet. If we're in the interrupt delay window,
2094 * delay the interrupt.
2095 */
2096 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2097 htole32(WTX_CMD_EOP | WTX_CMD_RS);
2098
2099 #if 0 /* XXXJRT */
2100 /*
2101 * If VLANs are enabled and the packet has a VLAN tag, set
2102 * up the descriptor to encapsulate the packet for us.
2103 *
2104 * This is only valid on the last descriptor of the packet.
2105 */
2106 if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2107 sc->sc_txdescs[lasttx].wtx_cmdlen |=
2108 htole32(WTX_CMD_VLE);
2109 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2110 = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2111 }
2112 #endif /* XXXJRT */
2113
2114 txs->txs_lastdesc = lasttx;
2115
2116 DPRINTF(WM_DEBUG_TX,
2117 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
2118 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2119
2120 /* Sync the descriptors we're using. */
2121 WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2122 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2123
2124 /* Give the packet to the chip. */
2125 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2126
2127 DPRINTF(WM_DEBUG_TX,
2128 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
2129
2130 DPRINTF(WM_DEBUG_TX,
2131 ("%s: TX: finished transmitting packet, job %d\n",
2132 sc->sc_dev.dv_xname, sc->sc_txsnext));
2133
2134 /* Advance the tx pointer. */
2135 sc->sc_txfree -= txs->txs_ndesc;
2136 sc->sc_txnext = nexttx;
2137
2138 sc->sc_txsfree--;
2139 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2140
2141 #if NBPFILTER > 0
2142 /* Pass the packet to any BPF listeners. */
2143 if (ifp->if_bpf)
2144 bpf_mtap(ifp->if_bpf, m0);
2145 #endif /* NBPFILTER > 0 */
2146 }
2147
2148 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2149 /* No more slots; notify upper layer. */
2150 ifp->if_flags |= IFF_OACTIVE;
2151 }
2152
2153 if (sc->sc_txfree != ofree) {
2154 /* Set a watchdog timer in case the chip flakes out. */
2155 ifp->if_timer = 5;
2156 }
2157 }
2158
2159 /*
2160 * wm_watchdog: [ifnet interface function]
2161 *
2162 * Watchdog timer handler.
2163 */
2164 static void
2165 wm_watchdog(struct ifnet *ifp)
2166 {
2167 struct wm_softc *sc = ifp->if_softc;
2168
2169 /*
2170 * Since we're using delayed interrupts, sweep up
2171 * before we report an error.
2172 */
2173 wm_txintr(sc);
2174
2175 if (sc->sc_txfree != WM_NTXDESC(sc)) {
2176 log(LOG_ERR,
2177 "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2178 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
2179 sc->sc_txnext);
2180 ifp->if_oerrors++;
2181
2182 /* Reset the interface. */
2183 (void) wm_init(ifp);
2184 }
2185
2186 /* Try to get more packets going. */
2187 wm_start(ifp);
2188 }
2189
2190 /*
2191 * wm_ioctl: [ifnet interface function]
2192 *
2193 * Handle control requests from the operator.
2194 */
2195 static int
2196 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2197 {
2198 struct wm_softc *sc = ifp->if_softc;
2199 struct ifreq *ifr = (struct ifreq *) data;
2200 int s, error;
2201
2202 s = splnet();
2203
2204 switch (cmd) {
2205 case SIOCSIFMEDIA:
2206 case SIOCGIFMEDIA:
2207 /* Flow control requires full-duplex mode. */
2208 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2209 (ifr->ifr_media & IFM_FDX) == 0)
2210 ifr->ifr_media &= ~IFM_ETH_FMASK;
2211 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2212 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2213 /* We can do both TXPAUSE and RXPAUSE. */
2214 ifr->ifr_media |=
2215 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2216 }
2217 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2218 }
2219 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2220 break;
2221 default:
2222 error = ether_ioctl(ifp, cmd, data);
2223 if (error == ENETRESET) {
2224 /*
2225 * Multicast list has changed; set the hardware filter
2226 * accordingly.
2227 */
2228 if (ifp->if_flags & IFF_RUNNING)
2229 wm_set_filter(sc);
2230 error = 0;
2231 }
2232 break;
2233 }
2234
2235 /* Try to get more packets going. */
2236 wm_start(ifp);
2237
2238 splx(s);
2239 return (error);
2240 }
2241
2242 /*
2243 * wm_intr:
2244 *
2245 * Interrupt service routine.
2246 */
2247 static int
2248 wm_intr(void *arg)
2249 {
2250 struct wm_softc *sc = arg;
2251 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2252 uint32_t icr;
2253 int handled = 0;
2254
2255 while (1 /* CONSTCOND */) {
2256 icr = CSR_READ(sc, WMREG_ICR);
2257 if ((icr & sc->sc_icr) == 0)
2258 break;
2259 #if 0 /*NRND > 0*/
2260 if (RND_ENABLED(&sc->rnd_source))
2261 rnd_add_uint32(&sc->rnd_source, icr);
2262 #endif
2263
2264 handled = 1;
2265
2266 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2267 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
2268 DPRINTF(WM_DEBUG_RX,
2269 ("%s: RX: got Rx intr 0x%08x\n",
2270 sc->sc_dev.dv_xname,
2271 icr & (ICR_RXDMT0|ICR_RXT0)));
2272 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
2273 }
2274 #endif
2275 wm_rxintr(sc);
2276
2277 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
2278 if (icr & ICR_TXDW) {
2279 DPRINTF(WM_DEBUG_TX,
2280 ("%s: TX: got TXDW interrupt\n",
2281 sc->sc_dev.dv_xname));
2282 WM_EVCNT_INCR(&sc->sc_ev_txdw);
2283 }
2284 #endif
2285 wm_txintr(sc);
2286
2287 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
2288 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
2289 wm_linkintr(sc, icr);
2290 }
2291
2292 if (icr & ICR_RXO) {
2293 ifp->if_ierrors++;
2294 #if defined(WM_DEBUG)
2295 log(LOG_WARNING, "%s: Receive overrun\n",
2296 sc->sc_dev.dv_xname);
2297 #endif /* defined(WM_DEBUG) */
2298 }
2299 }
2300
2301 if (handled) {
2302 /* Try to get more packets going. */
2303 wm_start(ifp);
2304 }
2305
2306 return (handled);
2307 }
2308
2309 /*
2310 * wm_txintr:
2311 *
2312 * Helper; handle transmit interrupts.
2313 */
2314 static void
2315 wm_txintr(struct wm_softc *sc)
2316 {
2317 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2318 struct wm_txsoft *txs;
2319 uint8_t status;
2320 int i;
2321
2322 ifp->if_flags &= ~IFF_OACTIVE;
2323
2324 /*
2325 * Go through the Tx list and free mbufs for those
2326 * frames which have been transmitted.
2327 */
2328 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
2329 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
2330 txs = &sc->sc_txsoft[i];
2331
2332 DPRINTF(WM_DEBUG_TX,
2333 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
2334
2335 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
2336 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2337
2338 status =
2339 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
2340 if ((status & WTX_ST_DD) == 0) {
2341 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
2342 BUS_DMASYNC_PREREAD);
2343 break;
2344 }
2345
2346 DPRINTF(WM_DEBUG_TX,
2347 ("%s: TX: job %d done: descs %d..%d\n",
2348 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
2349 txs->txs_lastdesc));
2350
2351 /*
2352 * XXX We should probably be using the statistics
2353 * XXX registers, but I don't know if they exist
2354 * XXX on chips before the i82544.
2355 */
2356
2357 #ifdef WM_EVENT_COUNTERS
2358 if (status & WTX_ST_TU)
2359 WM_EVCNT_INCR(&sc->sc_ev_tu);
2360 #endif /* WM_EVENT_COUNTERS */
2361
2362 if (status & (WTX_ST_EC|WTX_ST_LC)) {
2363 ifp->if_oerrors++;
2364 if (status & WTX_ST_LC)
2365 log(LOG_WARNING, "%s: late collision\n",
2366 sc->sc_dev.dv_xname);
2367 else if (status & WTX_ST_EC) {
2368 ifp->if_collisions += 16;
2369 log(LOG_WARNING, "%s: excessive collisions\n",
2370 sc->sc_dev.dv_xname);
2371 }
2372 } else
2373 ifp->if_opackets++;
2374
2375 sc->sc_txfree += txs->txs_ndesc;
2376 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
2377 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2378 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2379 m_freem(txs->txs_mbuf);
2380 txs->txs_mbuf = NULL;
2381 }
2382
2383 /* Update the dirty transmit buffer pointer. */
2384 sc->sc_txsdirty = i;
2385 DPRINTF(WM_DEBUG_TX,
2386 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
2387
2388 /*
2389 * If there are no more pending transmissions, cancel the watchdog
2390 * timer.
2391 */
2392 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
2393 ifp->if_timer = 0;
2394 }
2395
2396 /*
2397 * wm_rxintr:
2398 *
2399 * Helper; handle receive interrupts.
2400 */
2401 static void
2402 wm_rxintr(struct wm_softc *sc)
2403 {
2404 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2405 struct wm_rxsoft *rxs;
2406 struct mbuf *m;
2407 int i, len;
2408 uint8_t status, errors;
2409
2410 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
2411 rxs = &sc->sc_rxsoft[i];
2412
2413 DPRINTF(WM_DEBUG_RX,
2414 ("%s: RX: checking descriptor %d\n",
2415 sc->sc_dev.dv_xname, i));
2416
2417 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2418
2419 status = sc->sc_rxdescs[i].wrx_status;
2420 errors = sc->sc_rxdescs[i].wrx_errors;
2421 len = le16toh(sc->sc_rxdescs[i].wrx_len);
2422
2423 if ((status & WRX_ST_DD) == 0) {
2424 /*
2425 * We have processed all of the receive descriptors.
2426 */
2427 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
2428 break;
2429 }
2430
2431 if (__predict_false(sc->sc_rxdiscard)) {
2432 DPRINTF(WM_DEBUG_RX,
2433 ("%s: RX: discarding contents of descriptor %d\n",
2434 sc->sc_dev.dv_xname, i));
2435 WM_INIT_RXDESC(sc, i);
2436 if (status & WRX_ST_EOP) {
2437 /* Reset our state. */
2438 DPRINTF(WM_DEBUG_RX,
2439 ("%s: RX: resetting rxdiscard -> 0\n",
2440 sc->sc_dev.dv_xname));
2441 sc->sc_rxdiscard = 0;
2442 }
2443 continue;
2444 }
2445
2446 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2447 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2448
2449 m = rxs->rxs_mbuf;
2450
2451 /*
2452 * Add a new receive buffer to the ring, unless of
2453 * course the length is zero. Treat the latter as a
2454 * failed mapping.
2455 */
2456 if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
2457 /*
2458 * Failed, throw away what we've done so
2459 * far, and discard the rest of the packet.
2460 */
2461 ifp->if_ierrors++;
2462 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2463 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2464 WM_INIT_RXDESC(sc, i);
2465 if ((status & WRX_ST_EOP) == 0)
2466 sc->sc_rxdiscard = 1;
2467 if (sc->sc_rxhead != NULL)
2468 m_freem(sc->sc_rxhead);
2469 WM_RXCHAIN_RESET(sc);
2470 DPRINTF(WM_DEBUG_RX,
2471 ("%s: RX: Rx buffer allocation failed, "
2472 "dropping packet%s\n", sc->sc_dev.dv_xname,
2473 sc->sc_rxdiscard ? " (discard)" : ""));
2474 continue;
2475 }
2476
2477 WM_RXCHAIN_LINK(sc, m);
2478
2479 m->m_len = len;
2480
2481 DPRINTF(WM_DEBUG_RX,
2482 ("%s: RX: buffer at %p len %d\n",
2483 sc->sc_dev.dv_xname, m->m_data, len));
2484
2485 /*
2486 * If this is not the end of the packet, keep
2487 * looking.
2488 */
2489 if ((status & WRX_ST_EOP) == 0) {
2490 sc->sc_rxlen += len;
2491 DPRINTF(WM_DEBUG_RX,
2492 ("%s: RX: not yet EOP, rxlen -> %d\n",
2493 sc->sc_dev.dv_xname, sc->sc_rxlen));
2494 continue;
2495 }
2496
2497 /*
2498 * Okay, we have the entire packet now. The chip is
2499 * configured to include the FCS (not all chips can
2500 * be configured to strip it), so we need to trim it.
2501 */
2502 m->m_len -= ETHER_CRC_LEN;
2503
2504 *sc->sc_rxtailp = NULL;
2505 len = m->m_len + sc->sc_rxlen;
2506 m = sc->sc_rxhead;
2507
2508 WM_RXCHAIN_RESET(sc);
2509
2510 DPRINTF(WM_DEBUG_RX,
2511 ("%s: RX: have entire packet, len -> %d\n",
2512 sc->sc_dev.dv_xname, len));
2513
2514 /*
2515 * If an error occurred, update stats and drop the packet.
2516 */
2517 if (errors &
2518 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2519 ifp->if_ierrors++;
2520 if (errors & WRX_ER_SE)
2521 log(LOG_WARNING, "%s: symbol error\n",
2522 sc->sc_dev.dv_xname);
2523 else if (errors & WRX_ER_SEQ)
2524 log(LOG_WARNING, "%s: receive sequence error\n",
2525 sc->sc_dev.dv_xname);
2526 else if (errors & WRX_ER_CE)
2527 log(LOG_WARNING, "%s: CRC error\n",
2528 sc->sc_dev.dv_xname);
2529 m_freem(m);
2530 continue;
2531 }
2532
2533 /*
2534 * No errors. Receive the packet.
2535 */
2536 m->m_pkthdr.rcvif = ifp;
2537 m->m_pkthdr.len = len;
2538
2539 #if 0 /* XXXJRT */
2540 /*
2541 * If VLANs are enabled, VLAN packets have been unwrapped
2542 * for us. Associate the tag with the packet.
2543 */
2544 if ((status & WRX_ST_VP) != 0) {
2545 VLAN_INPUT_TAG(ifp, m,
2546 le16toh(sc->sc_rxdescs[i].wrx_special,
2547 continue);
2548 }
2549 #endif /* XXXJRT */
2550
2551 /*
2552 * Set up checksum info for this packet.
2553 */
2554 if ((status & WRX_ST_IXSM) == 0) {
2555 if (status & WRX_ST_IPCS) {
2556 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2557 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2558 if (errors & WRX_ER_IPE)
2559 m->m_pkthdr.csum_flags |=
2560 M_CSUM_IPv4_BAD;
2561 }
2562 if (status & WRX_ST_TCPCS) {
2563 /*
2564 * Note: we don't know if this was TCP or UDP,
2565 * so we just set both bits, and expect the
2566 * upper layers to deal.
2567 */
2568 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2569 m->m_pkthdr.csum_flags |=
2570 M_CSUM_TCPv4 | M_CSUM_UDPv4 |
2571 M_CSUM_TCPv6 | M_CSUM_UDPv6;
2572 if (errors & WRX_ER_TCPE)
2573 m->m_pkthdr.csum_flags |=
2574 M_CSUM_TCP_UDP_BAD;
2575 }
2576 }
2577
2578 ifp->if_ipackets++;
2579
2580 #if NBPFILTER > 0
2581 /* Pass this up to any BPF listeners. */
2582 if (ifp->if_bpf)
2583 bpf_mtap(ifp->if_bpf, m);
2584 #endif /* NBPFILTER > 0 */
2585
2586 /* Pass it on. */
2587 (*ifp->if_input)(ifp, m);
2588 }
2589
2590 /* Update the receive pointer. */
2591 sc->sc_rxptr = i;
2592
2593 DPRINTF(WM_DEBUG_RX,
2594 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2595 }
2596
2597 /*
2598 * wm_linkintr:
2599 *
2600 * Helper; handle link interrupts.
2601 */
2602 static void
2603 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2604 {
2605 uint32_t status;
2606
2607 /*
2608 * If we get a link status interrupt on a 1000BASE-T
2609 * device, just fall into the normal MII tick path.
2610 */
2611 if (sc->sc_flags & WM_F_HAS_MII) {
2612 if (icr & ICR_LSC) {
2613 DPRINTF(WM_DEBUG_LINK,
2614 ("%s: LINK: LSC -> mii_tick\n",
2615 sc->sc_dev.dv_xname));
2616 mii_tick(&sc->sc_mii);
2617 } else if (icr & ICR_RXSEQ) {
2618 DPRINTF(WM_DEBUG_LINK,
2619 ("%s: LINK Receive sequence error\n",
2620 sc->sc_dev.dv_xname));
2621 }
2622 return;
2623 }
2624
2625 /*
2626 * If we are now receiving /C/, check for link again in
2627 * a couple of link clock ticks.
2628 */
2629 if (icr & ICR_RXCFG) {
2630 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2631 sc->sc_dev.dv_xname));
2632 sc->sc_tbi_anstate = 2;
2633 }
2634
2635 if (icr & ICR_LSC) {
2636 status = CSR_READ(sc, WMREG_STATUS);
2637 if (status & STATUS_LU) {
2638 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2639 sc->sc_dev.dv_xname,
2640 (status & STATUS_FD) ? "FDX" : "HDX"));
2641 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2642 sc->sc_fcrtl &= ~FCRTL_XONE;
2643 if (status & STATUS_FD)
2644 sc->sc_tctl |=
2645 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2646 else
2647 sc->sc_tctl |=
2648 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2649 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2650 sc->sc_fcrtl |= FCRTL_XONE;
2651 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2652 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2653 WMREG_OLD_FCRTL : WMREG_FCRTL,
2654 sc->sc_fcrtl);
2655 sc->sc_tbi_linkup = 1;
2656 } else {
2657 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2658 sc->sc_dev.dv_xname));
2659 sc->sc_tbi_linkup = 0;
2660 }
2661 sc->sc_tbi_anstate = 2;
2662 wm_tbi_set_linkled(sc);
2663 } else if (icr & ICR_RXSEQ) {
2664 DPRINTF(WM_DEBUG_LINK,
2665 ("%s: LINK: Receive sequence error\n",
2666 sc->sc_dev.dv_xname));
2667 }
2668 }
2669
2670 /*
2671 * wm_tick:
2672 *
2673 * One second timer, used to check link status, sweep up
2674 * completed transmit jobs, etc.
2675 */
2676 static void
2677 wm_tick(void *arg)
2678 {
2679 struct wm_softc *sc = arg;
2680 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2681 int s;
2682
2683 s = splnet();
2684
2685 if (sc->sc_type >= WM_T_82542_2_1) {
2686 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2687 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2688 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2689 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2690 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2691 }
2692
2693 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2694 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2695
2696
2697 if (sc->sc_flags & WM_F_HAS_MII)
2698 mii_tick(&sc->sc_mii);
2699 else
2700 wm_tbi_check_link(sc);
2701
2702 splx(s);
2703
2704 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2705 }
2706
2707 /*
2708 * wm_reset:
2709 *
2710 * Reset the i82542 chip.
2711 */
2712 static void
2713 wm_reset(struct wm_softc *sc)
2714 {
2715 int i;
2716
2717 /*
2718 * Allocate on-chip memory according to the MTU size.
2719 * The Packet Buffer Allocation register must be written
2720 * before the chip is reset.
2721 */
2722 switch (sc->sc_type) {
2723 case WM_T_82547:
2724 case WM_T_82547_2:
2725 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2726 PBA_22K : PBA_30K;
2727 sc->sc_txfifo_head = 0;
2728 sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
2729 sc->sc_txfifo_size =
2730 (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
2731 sc->sc_txfifo_stall = 0;
2732 break;
2733 case WM_T_82571:
2734 case WM_T_82572:
2735 case WM_T_80003:
2736 sc->sc_pba = PBA_32K;
2737 break;
2738 case WM_T_82573:
2739 sc->sc_pba = PBA_12K;
2740 break;
2741 default:
2742 sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
2743 PBA_40K : PBA_48K;
2744 break;
2745 }
2746 CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
2747
2748 switch (sc->sc_type) {
2749 case WM_T_82544:
2750 case WM_T_82540:
2751 case WM_T_82545:
2752 case WM_T_82546:
2753 case WM_T_82541:
2754 case WM_T_82541_2:
2755 /*
2756 * On some chipsets, a reset through a memory-mapped write
2757 * cycle can cause the chip to reset before completing the
2758 * write cycle. This causes major headache that can be
2759 * avoided by issuing the reset via indirect register writes
2760 * through I/O space.
2761 *
2762 * So, if we successfully mapped the I/O BAR at attach time,
2763 * use that. Otherwise, try our luck with a memory-mapped
2764 * reset.
2765 */
2766 if (sc->sc_flags & WM_F_IOH_VALID)
2767 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2768 else
2769 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2770 break;
2771
2772 case WM_T_82545_3:
2773 case WM_T_82546_3:
2774 /* Use the shadow control register on these chips. */
2775 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2776 break;
2777
2778 default:
2779 /* Everything else can safely use the documented method. */
2780 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2781 break;
2782 }
2783 delay(10000);
2784
2785 for (i = 0; i < 1000; i++) {
2786 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2787 return;
2788 delay(20);
2789 }
2790
2791 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2792 log(LOG_ERR, "%s: reset failed to complete\n",
2793 sc->sc_dev.dv_xname);
2794
2795 if (sc->sc_type == WM_T_80003) {
2796 /* wait for eeprom to reload */
2797 for (i = 1000; i > 0; i--) {
2798 if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
2799 break;
2800 }
2801 if (i == 0) {
2802 log(LOG_ERR, "%s: auto read from eeprom failed to "
2803 "complete\n", sc->sc_dev.dv_xname);
2804 }
2805 }
2806 }
2807
2808 /*
2809 * wm_init: [ifnet interface function]
2810 *
2811 * Initialize the interface. Must be called at splnet().
2812 */
2813 static int
2814 wm_init(struct ifnet *ifp)
2815 {
2816 struct wm_softc *sc = ifp->if_softc;
2817 struct wm_rxsoft *rxs;
2818 int i, error = 0;
2819 uint32_t reg;
2820
2821 /*
2822 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2823 * There is a small but measurable benefit to avoiding the adjusment
2824 * of the descriptor so that the headers are aligned, for normal mtu,
2825 * on such platforms. One possibility is that the DMA itself is
2826 * slightly more efficient if the front of the entire packet (instead
2827 * of the front of the headers) is aligned.
2828 *
2829 * Note we must always set align_tweak to 0 if we are using
2830 * jumbo frames.
2831 */
2832 #ifdef __NO_STRICT_ALIGNMENT
2833 sc->sc_align_tweak = 0;
2834 #else
2835 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2836 sc->sc_align_tweak = 0;
2837 else
2838 sc->sc_align_tweak = 2;
2839 #endif /* __NO_STRICT_ALIGNMENT */
2840
2841 /* Cancel any pending I/O. */
2842 wm_stop(ifp, 0);
2843
2844 /* update statistics before reset */
2845 ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2846 ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
2847
2848 /* Reset the chip to a known state. */
2849 wm_reset(sc);
2850
2851 /* Initialize the transmit descriptor ring. */
2852 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
2853 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
2854 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2855 sc->sc_txfree = WM_NTXDESC(sc);
2856 sc->sc_txnext = 0;
2857
2858 if (sc->sc_type < WM_T_82543) {
2859 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
2860 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
2861 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
2862 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2863 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2864 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2865 } else {
2866 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
2867 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
2868 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
2869 CSR_WRITE(sc, WMREG_TDH, 0);
2870 CSR_WRITE(sc, WMREG_TDT, 0);
2871 CSR_WRITE(sc, WMREG_TIDV, 64);
2872 CSR_WRITE(sc, WMREG_TADV, 128);
2873
2874 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2875 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2876 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2877 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2878 }
2879 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2880 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2881
2882 /* Initialize the transmit job descriptors. */
2883 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
2884 sc->sc_txsoft[i].txs_mbuf = NULL;
2885 sc->sc_txsfree = WM_TXQUEUELEN(sc);
2886 sc->sc_txsnext = 0;
2887 sc->sc_txsdirty = 0;
2888
2889 /*
2890 * Initialize the receive descriptor and receive job
2891 * descriptor rings.
2892 */
2893 if (sc->sc_type < WM_T_82543) {
2894 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
2895 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
2896 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2897 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2898 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2899 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2900
2901 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2902 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2903 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2904 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2905 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2906 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2907 } else {
2908 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
2909 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
2910 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2911 CSR_WRITE(sc, WMREG_RDH, 0);
2912 CSR_WRITE(sc, WMREG_RDT, 0);
2913 CSR_WRITE(sc, WMREG_RDTR, 0 | RDTR_FPD);
2914 CSR_WRITE(sc, WMREG_RADV, 128);
2915 }
2916 for (i = 0; i < WM_NRXDESC; i++) {
2917 rxs = &sc->sc_rxsoft[i];
2918 if (rxs->rxs_mbuf == NULL) {
2919 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2920 log(LOG_ERR, "%s: unable to allocate or map rx "
2921 "buffer %d, error = %d\n",
2922 sc->sc_dev.dv_xname, i, error);
2923 /*
2924 * XXX Should attempt to run with fewer receive
2925 * XXX buffers instead of just failing.
2926 */
2927 wm_rxdrain(sc);
2928 goto out;
2929 }
2930 } else
2931 WM_INIT_RXDESC(sc, i);
2932 }
2933 sc->sc_rxptr = 0;
2934 sc->sc_rxdiscard = 0;
2935 WM_RXCHAIN_RESET(sc);
2936
2937 /*
2938 * Clear out the VLAN table -- we don't use it (yet).
2939 */
2940 CSR_WRITE(sc, WMREG_VET, 0);
2941 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2942 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2943
2944 /*
2945 * Set up flow-control parameters.
2946 *
2947 * XXX Values could probably stand some tuning.
2948 */
2949 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2950 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2951 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2952
2953 sc->sc_fcrtl = FCRTL_DFLT;
2954 if (sc->sc_type < WM_T_82543) {
2955 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2956 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
2957 } else {
2958 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2959 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
2960 }
2961 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2962
2963 #if 0 /* XXXJRT */
2964 /* Deal with VLAN enables. */
2965 if (VLAN_ATTACHED(&sc->sc_ethercom))
2966 sc->sc_ctrl |= CTRL_VME;
2967 else
2968 #endif /* XXXJRT */
2969 sc->sc_ctrl &= ~CTRL_VME;
2970
2971 /* Write the control registers. */
2972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2973 if (sc->sc_type >= WM_T_80003 && (sc->sc_flags & WM_F_HAS_MII)) {
2974 int val;
2975 val = CSR_READ(sc, WMREG_CTRL_EXT);
2976 val &= ~CTRL_EXT_LINK_MODE_MASK;
2977 CSR_WRITE(sc, WMREG_CTRL_EXT, val);
2978
2979 /* Bypass RX and TX FIFO's */
2980 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
2981 KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
2982 KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
2983
2984 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
2985 KUMCTRLSTA_INB_CTRL_DIS_PADDING |
2986 KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
2987 /*
2988 * Set the mac to wait the maximum time between each
2989 * iteration and increase the max iterations when
2990 * polling the phy; this fixes erroneous timeouts at 10Mbps.
2991 */
2992 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS, 0xFFFF);
2993 val = wm_kmrn_i80003_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
2994 val |= 0x3F;
2995 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM, val);
2996 }
2997 #if 0
2998 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2999 #endif
3000
3001 /*
3002 * Set up checksum offload parameters.
3003 */
3004 reg = CSR_READ(sc, WMREG_RXCSUM);
3005 reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
3006 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3007 reg |= RXCSUM_IPOFL;
3008 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
3009 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
3010 if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3011 reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
3012 CSR_WRITE(sc, WMREG_RXCSUM, reg);
3013
3014 /*
3015 * Set up the interrupt registers.
3016 */
3017 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3018 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
3019 ICR_RXO | ICR_RXT0;
3020 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
3021 sc->sc_icr |= ICR_RXCFG;
3022 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
3023
3024 /* Set up the inter-packet gap. */
3025 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
3026
3027 if (sc->sc_type >= WM_T_82543) {
3028 /* Set up the interrupt throttling register (units of 256ns) */
3029 sc->sc_itr = 1000000000 / (7000 * 256);
3030 CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
3031 }
3032
3033 #if 0 /* XXXJRT */
3034 /* Set the VLAN ethernetype. */
3035 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
3036 #endif
3037
3038 /*
3039 * Set up the transmit control register; we start out with
3040 * a collision distance suitable for FDX, but update it whe
3041 * we resolve the media type.
3042 */
3043 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
3044 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3045 if (sc->sc_type >= WM_T_82571)
3046 sc->sc_tctl |= TCTL_MULR;
3047 if (sc->sc_type >= WM_T_80003)
3048 sc->sc_tctl |= TCTL_RTLC;
3049 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3050
3051 /* Set the media. */
3052 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
3053
3054 /*
3055 * Set up the receive control register; we actually program
3056 * the register when we set the receive filter. Use multicast
3057 * address offset type 0.
3058 *
3059 * Only the i82544 has the ability to strip the incoming
3060 * CRC, so we don't enable that feature.
3061 */
3062 sc->sc_mchash_type = 0;
3063 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
3064 | RCTL_MO(sc->sc_mchash_type);
3065
3066 /* 82573 doesn't support jumbo frame */
3067 if (sc->sc_type != WM_T_82573)
3068 sc->sc_rctl |= RCTL_LPE;
3069
3070 if (MCLBYTES == 2048) {
3071 sc->sc_rctl |= RCTL_2k;
3072 } else {
3073 if (sc->sc_type >= WM_T_82543) {
3074 switch(MCLBYTES) {
3075 case 4096:
3076 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
3077 break;
3078 case 8192:
3079 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
3080 break;
3081 case 16384:
3082 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
3083 break;
3084 default:
3085 panic("wm_init: MCLBYTES %d unsupported",
3086 MCLBYTES);
3087 break;
3088 }
3089 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
3090 }
3091
3092 /* Set the receive filter. */
3093 wm_set_filter(sc);
3094
3095 /* Start the one second link check clock. */
3096 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3097
3098 /* ...all done! */
3099 ifp->if_flags |= IFF_RUNNING;
3100 ifp->if_flags &= ~IFF_OACTIVE;
3101
3102 out:
3103 if (error)
3104 log(LOG_ERR, "%s: interface not running\n",
3105 sc->sc_dev.dv_xname);
3106 return (error);
3107 }
3108
3109 /*
3110 * wm_rxdrain:
3111 *
3112 * Drain the receive queue.
3113 */
3114 static void
3115 wm_rxdrain(struct wm_softc *sc)
3116 {
3117 struct wm_rxsoft *rxs;
3118 int i;
3119
3120 for (i = 0; i < WM_NRXDESC; i++) {
3121 rxs = &sc->sc_rxsoft[i];
3122 if (rxs->rxs_mbuf != NULL) {
3123 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3124 m_freem(rxs->rxs_mbuf);
3125 rxs->rxs_mbuf = NULL;
3126 }
3127 }
3128 }
3129
3130 /*
3131 * wm_stop: [ifnet interface function]
3132 *
3133 * Stop transmission on the interface.
3134 */
3135 static void
3136 wm_stop(struct ifnet *ifp, int disable)
3137 {
3138 struct wm_softc *sc = ifp->if_softc;
3139 struct wm_txsoft *txs;
3140 int i;
3141
3142 /* Stop the one second clock. */
3143 callout_stop(&sc->sc_tick_ch);
3144
3145 /* Stop the 82547 Tx FIFO stall check timer. */
3146 if (sc->sc_type == WM_T_82547)
3147 callout_stop(&sc->sc_txfifo_ch);
3148
3149 if (sc->sc_flags & WM_F_HAS_MII) {
3150 /* Down the MII. */
3151 mii_down(&sc->sc_mii);
3152 }
3153
3154 /* Stop the transmit and receive processes. */
3155 CSR_WRITE(sc, WMREG_TCTL, 0);
3156 CSR_WRITE(sc, WMREG_RCTL, 0);
3157
3158 /*
3159 * Clear the interrupt mask to ensure the device cannot assert its
3160 * interrupt line.
3161 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
3162 * any currently pending or shared interrupt.
3163 */
3164 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3165 sc->sc_icr = 0;
3166
3167 /* Release any queued transmit buffers. */
3168 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
3169 txs = &sc->sc_txsoft[i];
3170 if (txs->txs_mbuf != NULL) {
3171 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3172 m_freem(txs->txs_mbuf);
3173 txs->txs_mbuf = NULL;
3174 }
3175 }
3176
3177 if (disable)
3178 wm_rxdrain(sc);
3179
3180 /* Mark the interface as down and cancel the watchdog timer. */
3181 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
3182 ifp->if_timer = 0;
3183 }
3184
3185 /*
3186 * wm_acquire_eeprom:
3187 *
3188 * Perform the EEPROM handshake required on some chips.
3189 */
3190 static int
3191 wm_acquire_eeprom(struct wm_softc *sc)
3192 {
3193 uint32_t reg;
3194 int x;
3195 int ret = 0;
3196
3197 /* always success */
3198 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3199 return 0;
3200
3201 if (sc->sc_flags & WM_F_SWFW_SYNC) {
3202 /* this will also do wm_get_swsm_semaphore() if needed */
3203 ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
3204 } else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
3205 ret = wm_get_swsm_semaphore(sc);
3206 }
3207
3208 if (ret)
3209 return 1;
3210
3211 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3212 reg = CSR_READ(sc, WMREG_EECD);
3213
3214 /* Request EEPROM access. */
3215 reg |= EECD_EE_REQ;
3216 CSR_WRITE(sc, WMREG_EECD, reg);
3217
3218 /* ..and wait for it to be granted. */
3219 for (x = 0; x < 1000; x++) {
3220 reg = CSR_READ(sc, WMREG_EECD);
3221 if (reg & EECD_EE_GNT)
3222 break;
3223 delay(5);
3224 }
3225 if ((reg & EECD_EE_GNT) == 0) {
3226 aprint_error("%s: could not acquire EEPROM GNT\n",
3227 sc->sc_dev.dv_xname);
3228 reg &= ~EECD_EE_REQ;
3229 CSR_WRITE(sc, WMREG_EECD, reg);
3230 if (sc->sc_flags & WM_F_SWFW_SYNC)
3231 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3232 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3233 wm_put_swsm_semaphore(sc);
3234 return (1);
3235 }
3236 }
3237
3238 return (0);
3239 }
3240
3241 /*
3242 * wm_release_eeprom:
3243 *
3244 * Release the EEPROM mutex.
3245 */
3246 static void
3247 wm_release_eeprom(struct wm_softc *sc)
3248 {
3249 uint32_t reg;
3250
3251 /* always success */
3252 if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
3253 return;
3254
3255 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
3256 reg = CSR_READ(sc, WMREG_EECD);
3257 reg &= ~EECD_EE_REQ;
3258 CSR_WRITE(sc, WMREG_EECD, reg);
3259 }
3260
3261 if (sc->sc_flags & WM_F_SWFW_SYNC)
3262 wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
3263 else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
3264 wm_put_swsm_semaphore(sc);
3265 }
3266
3267 /*
3268 * wm_eeprom_sendbits:
3269 *
3270 * Send a series of bits to the EEPROM.
3271 */
3272 static void
3273 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
3274 {
3275 uint32_t reg;
3276 int x;
3277
3278 reg = CSR_READ(sc, WMREG_EECD);
3279
3280 for (x = nbits; x > 0; x--) {
3281 if (bits & (1U << (x - 1)))
3282 reg |= EECD_DI;
3283 else
3284 reg &= ~EECD_DI;
3285 CSR_WRITE(sc, WMREG_EECD, reg);
3286 delay(2);
3287 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3288 delay(2);
3289 CSR_WRITE(sc, WMREG_EECD, reg);
3290 delay(2);
3291 }
3292 }
3293
3294 /*
3295 * wm_eeprom_recvbits:
3296 *
3297 * Receive a series of bits from the EEPROM.
3298 */
3299 static void
3300 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
3301 {
3302 uint32_t reg, val;
3303 int x;
3304
3305 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
3306
3307 val = 0;
3308 for (x = nbits; x > 0; x--) {
3309 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
3310 delay(2);
3311 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
3312 val |= (1U << (x - 1));
3313 CSR_WRITE(sc, WMREG_EECD, reg);
3314 delay(2);
3315 }
3316 *valp = val;
3317 }
3318
3319 /*
3320 * wm_read_eeprom_uwire:
3321 *
3322 * Read a word from the EEPROM using the MicroWire protocol.
3323 */
3324 static int
3325 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3326 {
3327 uint32_t reg, val;
3328 int i;
3329
3330 for (i = 0; i < wordcnt; i++) {
3331 /* Clear SK and DI. */
3332 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
3333 CSR_WRITE(sc, WMREG_EECD, reg);
3334
3335 /* Set CHIP SELECT. */
3336 reg |= EECD_CS;
3337 CSR_WRITE(sc, WMREG_EECD, reg);
3338 delay(2);
3339
3340 /* Shift in the READ command. */
3341 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
3342
3343 /* Shift in address. */
3344 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
3345
3346 /* Shift out the data. */
3347 wm_eeprom_recvbits(sc, &val, 16);
3348 data[i] = val & 0xffff;
3349
3350 /* Clear CHIP SELECT. */
3351 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
3352 CSR_WRITE(sc, WMREG_EECD, reg);
3353 delay(2);
3354 }
3355
3356 return (0);
3357 }
3358
3359 /*
3360 * wm_spi_eeprom_ready:
3361 *
3362 * Wait for a SPI EEPROM to be ready for commands.
3363 */
3364 static int
3365 wm_spi_eeprom_ready(struct wm_softc *sc)
3366 {
3367 uint32_t val;
3368 int usec;
3369
3370 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
3371 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
3372 wm_eeprom_recvbits(sc, &val, 8);
3373 if ((val & SPI_SR_RDY) == 0)
3374 break;
3375 }
3376 if (usec >= SPI_MAX_RETRIES) {
3377 aprint_error("%s: EEPROM failed to become ready\n",
3378 sc->sc_dev.dv_xname);
3379 return (1);
3380 }
3381 return (0);
3382 }
3383
3384 /*
3385 * wm_read_eeprom_spi:
3386 *
3387 * Read a work from the EEPROM using the SPI protocol.
3388 */
3389 static int
3390 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3391 {
3392 uint32_t reg, val;
3393 int i;
3394 uint8_t opc;
3395
3396 /* Clear SK and CS. */
3397 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
3398 CSR_WRITE(sc, WMREG_EECD, reg);
3399 delay(2);
3400
3401 if (wm_spi_eeprom_ready(sc))
3402 return (1);
3403
3404 /* Toggle CS to flush commands. */
3405 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
3406 delay(2);
3407 CSR_WRITE(sc, WMREG_EECD, reg);
3408 delay(2);
3409
3410 opc = SPI_OPC_READ;
3411 if (sc->sc_ee_addrbits == 8 && word >= 128)
3412 opc |= SPI_OPC_A8;
3413
3414 wm_eeprom_sendbits(sc, opc, 8);
3415 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
3416
3417 for (i = 0; i < wordcnt; i++) {
3418 wm_eeprom_recvbits(sc, &val, 16);
3419 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
3420 }
3421
3422 /* Raise CS and clear SK. */
3423 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
3424 CSR_WRITE(sc, WMREG_EECD, reg);
3425 delay(2);
3426
3427 return (0);
3428 }
3429
3430 #define EEPROM_CHECKSUM 0xBABA
3431 #define EEPROM_SIZE 0x0040
3432
3433 /*
3434 * wm_validate_eeprom_checksum
3435 *
3436 * The checksum is defined as the sum of the first 64 (16 bit) words.
3437 */
3438 static int
3439 wm_validate_eeprom_checksum(struct wm_softc *sc)
3440 {
3441 uint16_t checksum;
3442 uint16_t eeprom_data;
3443 int i;
3444
3445 checksum = 0;
3446
3447 for (i = 0; i < EEPROM_SIZE; i++) {
3448 if (wm_read_eeprom(sc, i, 1, &eeprom_data))
3449 return 1;
3450 checksum += eeprom_data;
3451 }
3452
3453 if (checksum != (uint16_t) EEPROM_CHECKSUM)
3454 return 1;
3455
3456 return 0;
3457 }
3458
3459 /*
3460 * wm_read_eeprom:
3461 *
3462 * Read data from the serial EEPROM.
3463 */
3464 static int
3465 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
3466 {
3467 int rv;
3468
3469 if (sc->sc_flags & WM_F_EEPROM_INVALID)
3470 return 1;
3471
3472 if (wm_acquire_eeprom(sc))
3473 return 1;
3474
3475 if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
3476 rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
3477 else if (sc->sc_flags & WM_F_EEPROM_SPI)
3478 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
3479 else
3480 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
3481
3482 wm_release_eeprom(sc);
3483 return rv;
3484 }
3485
3486 static int
3487 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
3488 uint16_t *data)
3489 {
3490 int i, eerd = 0;
3491 int error = 0;
3492
3493 for (i = 0; i < wordcnt; i++) {
3494 eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
3495
3496 CSR_WRITE(sc, WMREG_EERD, eerd);
3497 error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
3498 if (error != 0)
3499 break;
3500
3501 data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
3502 }
3503
3504 return error;
3505 }
3506
3507 static int
3508 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
3509 {
3510 uint32_t attempts = 100000;
3511 uint32_t i, reg = 0;
3512 int32_t done = -1;
3513
3514 for (i = 0; i < attempts; i++) {
3515 reg = CSR_READ(sc, rw);
3516
3517 if (reg & EERD_DONE) {
3518 done = 0;
3519 break;
3520 }
3521 delay(5);
3522 }
3523
3524 return done;
3525 }
3526
3527 /*
3528 * wm_add_rxbuf:
3529 *
3530 * Add a receive buffer to the indiciated descriptor.
3531 */
3532 static int
3533 wm_add_rxbuf(struct wm_softc *sc, int idx)
3534 {
3535 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
3536 struct mbuf *m;
3537 int error;
3538
3539 MGETHDR(m, M_DONTWAIT, MT_DATA);
3540 if (m == NULL)
3541 return (ENOBUFS);
3542
3543 MCLGET(m, M_DONTWAIT);
3544 if ((m->m_flags & M_EXT) == 0) {
3545 m_freem(m);
3546 return (ENOBUFS);
3547 }
3548
3549 if (rxs->rxs_mbuf != NULL)
3550 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3551
3552 rxs->rxs_mbuf = m;
3553
3554 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3555 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3556 BUS_DMA_READ|BUS_DMA_NOWAIT);
3557 if (error) {
3558 /* XXX XXX XXX */
3559 printf("%s: unable to load rx DMA map %d, error = %d\n",
3560 sc->sc_dev.dv_xname, idx, error);
3561 panic("wm_add_rxbuf");
3562 }
3563
3564 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3565 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3566
3567 WM_INIT_RXDESC(sc, idx);
3568
3569 return (0);
3570 }
3571
3572 /*
3573 * wm_set_ral:
3574 *
3575 * Set an entery in the receive address list.
3576 */
3577 static void
3578 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3579 {
3580 uint32_t ral_lo, ral_hi;
3581
3582 if (enaddr != NULL) {
3583 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3584 (enaddr[3] << 24);
3585 ral_hi = enaddr[4] | (enaddr[5] << 8);
3586 ral_hi |= RAL_AV;
3587 } else {
3588 ral_lo = 0;
3589 ral_hi = 0;
3590 }
3591
3592 if (sc->sc_type >= WM_T_82544) {
3593 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3594 ral_lo);
3595 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3596 ral_hi);
3597 } else {
3598 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3599 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3600 }
3601 }
3602
3603 /*
3604 * wm_mchash:
3605 *
3606 * Compute the hash of the multicast address for the 4096-bit
3607 * multicast filter.
3608 */
3609 static uint32_t
3610 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3611 {
3612 static const int lo_shift[4] = { 4, 3, 2, 0 };
3613 static const int hi_shift[4] = { 4, 5, 6, 8 };
3614 uint32_t hash;
3615
3616 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3617 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3618
3619 return (hash & 0xfff);
3620 }
3621
3622 /*
3623 * wm_set_filter:
3624 *
3625 * Set up the receive filter.
3626 */
3627 static void
3628 wm_set_filter(struct wm_softc *sc)
3629 {
3630 struct ethercom *ec = &sc->sc_ethercom;
3631 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3632 struct ether_multi *enm;
3633 struct ether_multistep step;
3634 bus_addr_t mta_reg;
3635 uint32_t hash, reg, bit;
3636 int i;
3637
3638 if (sc->sc_type >= WM_T_82544)
3639 mta_reg = WMREG_CORDOVA_MTA;
3640 else
3641 mta_reg = WMREG_MTA;
3642
3643 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3644
3645 if (ifp->if_flags & IFF_BROADCAST)
3646 sc->sc_rctl |= RCTL_BAM;
3647 if (ifp->if_flags & IFF_PROMISC) {
3648 sc->sc_rctl |= RCTL_UPE;
3649 goto allmulti;
3650 }
3651
3652 /*
3653 * Set the station address in the first RAL slot, and
3654 * clear the remaining slots.
3655 */
3656 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
3657 for (i = 1; i < WM_RAL_TABSIZE; i++)
3658 wm_set_ral(sc, NULL, i);
3659
3660 /* Clear out the multicast table. */
3661 for (i = 0; i < WM_MC_TABSIZE; i++)
3662 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3663
3664 ETHER_FIRST_MULTI(step, ec, enm);
3665 while (enm != NULL) {
3666 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3667 /*
3668 * We must listen to a range of multicast addresses.
3669 * For now, just accept all multicasts, rather than
3670 * trying to set only those filter bits needed to match
3671 * the range. (At this time, the only use of address
3672 * ranges is for IP multicast routing, for which the
3673 * range is big enough to require all bits set.)
3674 */
3675 goto allmulti;
3676 }
3677
3678 hash = wm_mchash(sc, enm->enm_addrlo);
3679
3680 reg = (hash >> 5) & 0x7f;
3681 bit = hash & 0x1f;
3682
3683 hash = CSR_READ(sc, mta_reg + (reg << 2));
3684 hash |= 1U << bit;
3685
3686 /* XXX Hardware bug?? */
3687 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3688 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3689 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3690 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3691 } else
3692 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3693
3694 ETHER_NEXT_MULTI(step, enm);
3695 }
3696
3697 ifp->if_flags &= ~IFF_ALLMULTI;
3698 goto setit;
3699
3700 allmulti:
3701 ifp->if_flags |= IFF_ALLMULTI;
3702 sc->sc_rctl |= RCTL_MPE;
3703
3704 setit:
3705 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3706 }
3707
3708 /*
3709 * wm_tbi_mediainit:
3710 *
3711 * Initialize media for use on 1000BASE-X devices.
3712 */
3713 static void
3714 wm_tbi_mediainit(struct wm_softc *sc)
3715 {
3716 const char *sep = "";
3717
3718 if (sc->sc_type < WM_T_82543)
3719 sc->sc_tipg = TIPG_WM_DFLT;
3720 else
3721 sc->sc_tipg = TIPG_LG_DFLT;
3722
3723 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3724 wm_tbi_mediastatus);
3725
3726 /*
3727 * SWD Pins:
3728 *
3729 * 0 = Link LED (output)
3730 * 1 = Loss Of Signal (input)
3731 */
3732 sc->sc_ctrl |= CTRL_SWDPIO(0);
3733 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3734
3735 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3736
3737 #define ADD(ss, mm, dd) \
3738 do { \
3739 aprint_normal("%s%s", sep, ss); \
3740 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3741 sep = ", "; \
3742 } while (/*CONSTCOND*/0)
3743
3744 aprint_normal("%s: ", sc->sc_dev.dv_xname);
3745 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3746 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3747 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3748 aprint_normal("\n");
3749
3750 #undef ADD
3751
3752 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3753 }
3754
3755 /*
3756 * wm_tbi_mediastatus: [ifmedia interface function]
3757 *
3758 * Get the current interface media status on a 1000BASE-X device.
3759 */
3760 static void
3761 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3762 {
3763 struct wm_softc *sc = ifp->if_softc;
3764 uint32_t ctrl;
3765
3766 ifmr->ifm_status = IFM_AVALID;
3767 ifmr->ifm_active = IFM_ETHER;
3768
3769 if (sc->sc_tbi_linkup == 0) {
3770 ifmr->ifm_active |= IFM_NONE;
3771 return;
3772 }
3773
3774 ifmr->ifm_status |= IFM_ACTIVE;
3775 ifmr->ifm_active |= IFM_1000_SX;
3776 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3777 ifmr->ifm_active |= IFM_FDX;
3778 ctrl = CSR_READ(sc, WMREG_CTRL);
3779 if (ctrl & CTRL_RFCE)
3780 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
3781 if (ctrl & CTRL_TFCE)
3782 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
3783 }
3784
3785 /*
3786 * wm_tbi_mediachange: [ifmedia interface function]
3787 *
3788 * Set hardware to newly-selected media on a 1000BASE-X device.
3789 */
3790 static int
3791 wm_tbi_mediachange(struct ifnet *ifp)
3792 {
3793 struct wm_softc *sc = ifp->if_softc;
3794 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3795 uint32_t status;
3796 int i;
3797
3798 sc->sc_txcw = ife->ifm_data;
3799 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
3800 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
3801 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
3802 sc->sc_txcw |= TXCW_ANE;
3803
3804 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3805 delay(10000);
3806
3807 /* NOTE: CTRL will update TFCE and RFCE automatically. */
3808
3809 sc->sc_tbi_anstate = 0;
3810
3811 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3812 /* Have signal; wait for the link to come up. */
3813 for (i = 0; i < 50; i++) {
3814 delay(10000);
3815 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3816 break;
3817 }
3818
3819 status = CSR_READ(sc, WMREG_STATUS);
3820 if (status & STATUS_LU) {
3821 /* Link is up. */
3822 DPRINTF(WM_DEBUG_LINK,
3823 ("%s: LINK: set media -> link up %s\n",
3824 sc->sc_dev.dv_xname,
3825 (status & STATUS_FD) ? "FDX" : "HDX"));
3826 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3827 sc->sc_fcrtl &= ~FCRTL_XONE;
3828 if (status & STATUS_FD)
3829 sc->sc_tctl |=
3830 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3831 else
3832 sc->sc_tctl |=
3833 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3834 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
3835 sc->sc_fcrtl |= FCRTL_XONE;
3836 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3837 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3838 WMREG_OLD_FCRTL : WMREG_FCRTL,
3839 sc->sc_fcrtl);
3840 sc->sc_tbi_linkup = 1;
3841 } else {
3842 /* Link is down. */
3843 DPRINTF(WM_DEBUG_LINK,
3844 ("%s: LINK: set media -> link down\n",
3845 sc->sc_dev.dv_xname));
3846 sc->sc_tbi_linkup = 0;
3847 }
3848 } else {
3849 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3850 sc->sc_dev.dv_xname));
3851 sc->sc_tbi_linkup = 0;
3852 }
3853
3854 wm_tbi_set_linkled(sc);
3855
3856 return (0);
3857 }
3858
3859 /*
3860 * wm_tbi_set_linkled:
3861 *
3862 * Update the link LED on 1000BASE-X devices.
3863 */
3864 static void
3865 wm_tbi_set_linkled(struct wm_softc *sc)
3866 {
3867
3868 if (sc->sc_tbi_linkup)
3869 sc->sc_ctrl |= CTRL_SWDPIN(0);
3870 else
3871 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3872
3873 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3874 }
3875
3876 /*
3877 * wm_tbi_check_link:
3878 *
3879 * Check the link on 1000BASE-X devices.
3880 */
3881 static void
3882 wm_tbi_check_link(struct wm_softc *sc)
3883 {
3884 uint32_t rxcw, ctrl, status;
3885
3886 if (sc->sc_tbi_anstate == 0)
3887 return;
3888 else if (sc->sc_tbi_anstate > 1) {
3889 DPRINTF(WM_DEBUG_LINK,
3890 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3891 sc->sc_tbi_anstate));
3892 sc->sc_tbi_anstate--;
3893 return;
3894 }
3895
3896 sc->sc_tbi_anstate = 0;
3897
3898 rxcw = CSR_READ(sc, WMREG_RXCW);
3899 ctrl = CSR_READ(sc, WMREG_CTRL);
3900 status = CSR_READ(sc, WMREG_STATUS);
3901
3902 if ((status & STATUS_LU) == 0) {
3903 DPRINTF(WM_DEBUG_LINK,
3904 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3905 sc->sc_tbi_linkup = 0;
3906 } else {
3907 DPRINTF(WM_DEBUG_LINK,
3908 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3909 (status & STATUS_FD) ? "FDX" : "HDX"));
3910 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3911 sc->sc_fcrtl &= ~FCRTL_XONE;
3912 if (status & STATUS_FD)
3913 sc->sc_tctl |=
3914 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3915 else
3916 sc->sc_tctl |=
3917 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3918 if (ctrl & CTRL_TFCE)
3919 sc->sc_fcrtl |= FCRTL_XONE;
3920 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3921 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3922 WMREG_OLD_FCRTL : WMREG_FCRTL,
3923 sc->sc_fcrtl);
3924 sc->sc_tbi_linkup = 1;
3925 }
3926
3927 wm_tbi_set_linkled(sc);
3928 }
3929
3930 /*
3931 * wm_gmii_reset:
3932 *
3933 * Reset the PHY.
3934 */
3935 static void
3936 wm_gmii_reset(struct wm_softc *sc)
3937 {
3938 uint32_t reg;
3939 int func = 0; /* XXX gcc */
3940
3941 if (sc->sc_type >= WM_T_80003) {
3942 func = (CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1;
3943 if (wm_get_swfw_semaphore(sc,
3944 func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
3945 return;
3946 }
3947 if (sc->sc_type >= WM_T_82544) {
3948 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3949 delay(20000);
3950
3951 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3952 delay(20000);
3953 } else {
3954 /* The PHY reset pin is active-low. */
3955 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3956 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3957 CTRL_EXT_SWDPIN(4));
3958 reg |= CTRL_EXT_SWDPIO(4);
3959
3960 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3961 delay(10);
3962
3963 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3964 delay(10);
3965
3966 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3967 delay(10);
3968 #if 0
3969 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3970 #endif
3971 }
3972 if (sc->sc_type >= WM_T_80003)
3973 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
3974 }
3975
3976 /*
3977 * wm_gmii_mediainit:
3978 *
3979 * Initialize media for use on 1000BASE-T devices.
3980 */
3981 static void
3982 wm_gmii_mediainit(struct wm_softc *sc)
3983 {
3984 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3985
3986 /* We have MII. */
3987 sc->sc_flags |= WM_F_HAS_MII;
3988
3989 if (sc->sc_type >= WM_T_80003)
3990 sc->sc_tipg = TIPG_1000T_80003_DFLT;
3991 else
3992 sc->sc_tipg = TIPG_1000T_DFLT;
3993
3994 /*
3995 * Let the chip set speed/duplex on its own based on
3996 * signals from the PHY.
3997 * XXXbouyer - I'm not sure this is right for the 80003,
3998 * the em driver only sets CTRL_SLU here - but it seems to work.
3999 */
4000 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
4001 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4002
4003 /* Initialize our media structures and probe the GMII. */
4004 sc->sc_mii.mii_ifp = ifp;
4005
4006 if (sc->sc_type >= WM_T_80003) {
4007 sc->sc_mii.mii_readreg = wm_gmii_i80003_readreg;
4008 sc->sc_mii.mii_writereg = wm_gmii_i80003_writereg;
4009 } else if (sc->sc_type >= WM_T_82544) {
4010 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
4011 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
4012 } else {
4013 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
4014 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
4015 }
4016 sc->sc_mii.mii_statchg = wm_gmii_statchg;
4017
4018 wm_gmii_reset(sc);
4019
4020 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
4021 wm_gmii_mediastatus);
4022
4023 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
4024 MII_OFFSET_ANY, MIIF_DOPAUSE);
4025 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
4026 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
4027 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
4028 } else
4029 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
4030 }
4031
4032 /*
4033 * wm_gmii_mediastatus: [ifmedia interface function]
4034 *
4035 * Get the current interface media status on a 1000BASE-T device.
4036 */
4037 static void
4038 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
4039 {
4040 struct wm_softc *sc = ifp->if_softc;
4041
4042 mii_pollstat(&sc->sc_mii);
4043 ifmr->ifm_status = sc->sc_mii.mii_media_status;
4044 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
4045 sc->sc_flowflags;
4046 }
4047
4048 /*
4049 * wm_gmii_mediachange: [ifmedia interface function]
4050 *
4051 * Set hardware to newly-selected media on a 1000BASE-T device.
4052 */
4053 static int
4054 wm_gmii_mediachange(struct ifnet *ifp)
4055 {
4056 struct wm_softc *sc = ifp->if_softc;
4057 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
4058
4059 if (ifp->if_flags & IFF_UP) {
4060 sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
4061 sc->sc_ctrl |= CTRL_SLU;
4062 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
4063 sc->sc_ctrl |= CTRL_ASDE;
4064 } else {
4065 sc->sc_ctrl &= ~CTRL_ASDE;
4066 sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
4067 if (ife->ifm_media & IFM_FDX)
4068 sc->sc_ctrl |= CTRL_FD;
4069 switch(IFM_SUBTYPE(ife->ifm_media)) {
4070 case IFM_10_T:
4071 sc->sc_ctrl |= CTRL_SPEED_10;
4072 break;
4073 case IFM_100_TX:
4074 sc->sc_ctrl |= CTRL_SPEED_100;
4075 break;
4076 case IFM_1000_T:
4077 sc->sc_ctrl |= CTRL_SPEED_1000;
4078 break;
4079 default:
4080 panic("wm_gmii_mediachange: bad media 0x%x",
4081 ife->ifm_media);
4082 }
4083 }
4084 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4085 mii_mediachg(&sc->sc_mii);
4086 }
4087 return (0);
4088 }
4089
4090 #define MDI_IO CTRL_SWDPIN(2)
4091 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
4092 #define MDI_CLK CTRL_SWDPIN(3)
4093
4094 static void
4095 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
4096 {
4097 uint32_t i, v;
4098
4099 v = CSR_READ(sc, WMREG_CTRL);
4100 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4101 v |= MDI_DIR | CTRL_SWDPIO(3);
4102
4103 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
4104 if (data & i)
4105 v |= MDI_IO;
4106 else
4107 v &= ~MDI_IO;
4108 CSR_WRITE(sc, WMREG_CTRL, v);
4109 delay(10);
4110 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4111 delay(10);
4112 CSR_WRITE(sc, WMREG_CTRL, v);
4113 delay(10);
4114 }
4115 }
4116
4117 static uint32_t
4118 i82543_mii_recvbits(struct wm_softc *sc)
4119 {
4120 uint32_t v, i, data = 0;
4121
4122 v = CSR_READ(sc, WMREG_CTRL);
4123 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
4124 v |= CTRL_SWDPIO(3);
4125
4126 CSR_WRITE(sc, WMREG_CTRL, v);
4127 delay(10);
4128 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4129 delay(10);
4130 CSR_WRITE(sc, WMREG_CTRL, v);
4131 delay(10);
4132
4133 for (i = 0; i < 16; i++) {
4134 data <<= 1;
4135 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4136 delay(10);
4137 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
4138 data |= 1;
4139 CSR_WRITE(sc, WMREG_CTRL, v);
4140 delay(10);
4141 }
4142
4143 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
4144 delay(10);
4145 CSR_WRITE(sc, WMREG_CTRL, v);
4146 delay(10);
4147
4148 return (data);
4149 }
4150
4151 #undef MDI_IO
4152 #undef MDI_DIR
4153 #undef MDI_CLK
4154
4155 /*
4156 * wm_gmii_i82543_readreg: [mii interface function]
4157 *
4158 * Read a PHY register on the GMII (i82543 version).
4159 */
4160 static int
4161 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
4162 {
4163 struct wm_softc *sc = (void *) self;
4164 int rv;
4165
4166 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4167 i82543_mii_sendbits(sc, reg | (phy << 5) |
4168 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
4169 rv = i82543_mii_recvbits(sc) & 0xffff;
4170
4171 DPRINTF(WM_DEBUG_GMII,
4172 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
4173 sc->sc_dev.dv_xname, phy, reg, rv));
4174
4175 return (rv);
4176 }
4177
4178 /*
4179 * wm_gmii_i82543_writereg: [mii interface function]
4180 *
4181 * Write a PHY register on the GMII (i82543 version).
4182 */
4183 static void
4184 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
4185 {
4186 struct wm_softc *sc = (void *) self;
4187
4188 i82543_mii_sendbits(sc, 0xffffffffU, 32);
4189 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
4190 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
4191 (MII_COMMAND_START << 30), 32);
4192 }
4193
4194 /*
4195 * wm_gmii_i82544_readreg: [mii interface function]
4196 *
4197 * Read a PHY register on the GMII.
4198 */
4199 static int
4200 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
4201 {
4202 struct wm_softc *sc = (void *) self;
4203 uint32_t mdic = 0;
4204 int i, rv;
4205
4206 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
4207 MDIC_REGADD(reg));
4208
4209 for (i = 0; i < 320; i++) {
4210 mdic = CSR_READ(sc, WMREG_MDIC);
4211 if (mdic & MDIC_READY)
4212 break;
4213 delay(10);
4214 }
4215
4216 if ((mdic & MDIC_READY) == 0) {
4217 log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
4218 sc->sc_dev.dv_xname, phy, reg);
4219 rv = 0;
4220 } else if (mdic & MDIC_E) {
4221 #if 0 /* This is normal if no PHY is present. */
4222 log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
4223 sc->sc_dev.dv_xname, phy, reg);
4224 #endif
4225 rv = 0;
4226 } else {
4227 rv = MDIC_DATA(mdic);
4228 if (rv == 0xffff)
4229 rv = 0;
4230 }
4231
4232 return (rv);
4233 }
4234
4235 /*
4236 * wm_gmii_i82544_writereg: [mii interface function]
4237 *
4238 * Write a PHY register on the GMII.
4239 */
4240 static void
4241 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
4242 {
4243 struct wm_softc *sc = (void *) self;
4244 uint32_t mdic = 0;
4245 int i;
4246
4247 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
4248 MDIC_REGADD(reg) | MDIC_DATA(val));
4249
4250 for (i = 0; i < 320; i++) {
4251 mdic = CSR_READ(sc, WMREG_MDIC);
4252 if (mdic & MDIC_READY)
4253 break;
4254 delay(10);
4255 }
4256
4257 if ((mdic & MDIC_READY) == 0)
4258 log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
4259 sc->sc_dev.dv_xname, phy, reg);
4260 else if (mdic & MDIC_E)
4261 log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
4262 sc->sc_dev.dv_xname, phy, reg);
4263 }
4264
4265 /*
4266 * wm_gmii_i80003_readreg: [mii interface function]
4267 *
4268 * Read a PHY register on the kumeran
4269 * This could be handled by the PHY layer if we didn't have to lock the
4270 * ressource ...
4271 */
4272 static int
4273 wm_gmii_i80003_readreg(struct device *self, int phy, int reg)
4274 {
4275 struct wm_softc *sc = (void *) self;
4276 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4277 int rv;
4278
4279 if (phy != 1) /* only one PHY on kumeran bus */
4280 return 0;
4281
4282 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4283 return 0;
4284
4285 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4286 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4287 reg >> GG82563_PAGE_SHIFT);
4288 } else {
4289 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4290 reg >> GG82563_PAGE_SHIFT);
4291 }
4292
4293 rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
4294 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4295 return (rv);
4296 }
4297
4298 /*
4299 * wm_gmii_i80003_writereg: [mii interface function]
4300 *
4301 * Write a PHY register on the kumeran.
4302 * This could be handled by the PHY layer if we didn't have to lock the
4303 * ressource ...
4304 */
4305 static void
4306 wm_gmii_i80003_writereg(struct device *self, int phy, int reg, int val)
4307 {
4308 struct wm_softc *sc = (void *) self;
4309 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4310
4311 if (phy != 1) /* only one PHY on kumeran bus */
4312 return;
4313
4314 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4315 return;
4316
4317 if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
4318 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
4319 reg >> GG82563_PAGE_SHIFT);
4320 } else {
4321 wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
4322 reg >> GG82563_PAGE_SHIFT);
4323 }
4324
4325 wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
4326 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4327 }
4328
4329 /*
4330 * wm_gmii_statchg: [mii interface function]
4331 *
4332 * Callback from MII layer when media changes.
4333 */
4334 static void
4335 wm_gmii_statchg(struct device *self)
4336 {
4337 struct wm_softc *sc = (void *) self;
4338 struct mii_data *mii = &sc->sc_mii;
4339
4340 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
4341 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
4342 sc->sc_fcrtl &= ~FCRTL_XONE;
4343
4344 /*
4345 * Get flow control negotiation result.
4346 */
4347 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
4348 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
4349 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
4350 mii->mii_media_active &= ~IFM_ETH_FMASK;
4351 }
4352
4353 if (sc->sc_flowflags & IFM_FLOW) {
4354 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
4355 sc->sc_ctrl |= CTRL_TFCE;
4356 sc->sc_fcrtl |= FCRTL_XONE;
4357 }
4358 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
4359 sc->sc_ctrl |= CTRL_RFCE;
4360 }
4361
4362 if (sc->sc_mii.mii_media_active & IFM_FDX) {
4363 DPRINTF(WM_DEBUG_LINK,
4364 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
4365 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4366 } else {
4367 DPRINTF(WM_DEBUG_LINK,
4368 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
4369 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
4370 }
4371
4372 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4373 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4374 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
4375 : WMREG_FCRTL, sc->sc_fcrtl);
4376 if (sc->sc_type >= WM_T_80003) {
4377 switch(IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
4378 case IFM_1000_T:
4379 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4380 KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
4381 sc->sc_tipg = TIPG_1000T_80003_DFLT;
4382 break;
4383 default:
4384 wm_kmrn_i80003_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
4385 KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
4386 sc->sc_tipg = TIPG_10_100_80003_DFLT;
4387 break;
4388 }
4389 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4390 }
4391 }
4392
4393 /*
4394 * wm_kmrn_i80003_readreg:
4395 *
4396 * Read a kumeran register
4397 */
4398 static int
4399 wm_kmrn_i80003_readreg(struct wm_softc *sc, int reg)
4400 {
4401 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4402 int rv;
4403
4404 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4405 return 0;
4406
4407 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4408 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4409 KUMCTRLSTA_REN);
4410 delay(2);
4411
4412 rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
4413 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4414 return (rv);
4415 }
4416
4417 /*
4418 * wm_kmrn_i80003_writereg:
4419 *
4420 * Write a kumeran register
4421 */
4422 static void
4423 wm_kmrn_i80003_writereg(struct wm_softc *sc, int reg, int val)
4424 {
4425 int func = ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1);
4426
4427 if (wm_get_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM))
4428 return;
4429
4430 CSR_WRITE(sc, WMREG_KUMCTRLSTA,
4431 ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
4432 (val & KUMCTRLSTA_MASK));
4433 wm_put_swfw_semaphore(sc, func ? SWFW_PHY1_SM : SWFW_PHY0_SM);
4434 }
4435
4436 static int
4437 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
4438 {
4439 uint32_t eecd = 0;
4440
4441 if (sc->sc_type == WM_T_82573) {
4442 eecd = CSR_READ(sc, WMREG_EECD);
4443
4444 /* Isolate bits 15 & 16 */
4445 eecd = ((eecd >> 15) & 0x03);
4446
4447 /* If both bits are set, device is Flash type */
4448 if (eecd == 0x03) {
4449 return 0;
4450 }
4451 }
4452 return 1;
4453 }
4454
4455 static int
4456 wm_get_swsm_semaphore(struct wm_softc *sc)
4457 {
4458 int32_t timeout;
4459 uint32_t swsm;
4460
4461 /* Get the FW semaphore. */
4462 timeout = 1000 + 1; /* XXX */
4463 while (timeout) {
4464 swsm = CSR_READ(sc, WMREG_SWSM);
4465 swsm |= SWSM_SWESMBI;
4466 CSR_WRITE(sc, WMREG_SWSM, swsm);
4467 /* if we managed to set the bit we got the semaphore. */
4468 swsm = CSR_READ(sc, WMREG_SWSM);
4469 if (swsm & SWSM_SWESMBI)
4470 break;
4471
4472 delay(50);
4473 timeout--;
4474 }
4475
4476 if (timeout == 0) {
4477 aprint_error("%s: could not acquire EEPROM GNT\n",
4478 sc->sc_dev.dv_xname);
4479 /* Release semaphores */
4480 wm_put_swsm_semaphore(sc);
4481 return 1;
4482 }
4483 return 0;
4484 }
4485
4486 static void
4487 wm_put_swsm_semaphore(struct wm_softc *sc)
4488 {
4489 uint32_t swsm;
4490
4491 swsm = CSR_READ(sc, WMREG_SWSM);
4492 swsm &= ~(SWSM_SWESMBI);
4493 CSR_WRITE(sc, WMREG_SWSM, swsm);
4494 }
4495
4496 static int
4497 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask) {
4498 uint32_t swfw_sync;
4499 uint32_t swmask = mask << SWFW_SOFT_SHIFT;
4500 uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
4501 int timeout = 200;
4502
4503 for(timeout = 0; timeout < 200; timeout++) {
4504 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4505 if (wm_get_swsm_semaphore(sc))
4506 return 1;
4507 }
4508 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4509 if ((swfw_sync & (swmask | fwmask)) == 0) {
4510 swfw_sync |= swmask;
4511 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4512 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4513 wm_put_swsm_semaphore(sc);
4514 return 0;
4515 }
4516 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4517 wm_put_swsm_semaphore(sc);
4518 delay(5000);
4519 }
4520 printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
4521 sc->sc_dev.dv_xname, mask, swfw_sync);
4522 return 1;
4523 }
4524
4525 static void
4526 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask) {
4527 uint32_t swfw_sync;
4528
4529 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
4530 while (wm_get_swsm_semaphore(sc) != 0)
4531 continue;
4532 }
4533 swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
4534 swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
4535 CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
4536 if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
4537 wm_put_swsm_semaphore(sc);
4538 }
4539