if_wm.c revision 1.74 1 /* $NetBSD: if_wm.c,v 1.74 2004/07/15 15:21:57 tron Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out what to do with the i82545GM and i82546GB
45 * SERDES controllers.
46 * - Fix hw VLAN assist.
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.74 2004/07/15 15:21:57 tron Exp $");
51
52 #include "bpfilter.h"
53 #include "rnd.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/callout.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/socket.h>
62 #include <sys/ioctl.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/queue.h>
66
67 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
68
69 #if NRND > 0
70 #include <sys/rnd.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/if_dl.h>
75 #include <net/if_media.h>
76 #include <net/if_ether.h>
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #include <netinet/in.h> /* XXX for struct ip */
83 #include <netinet/in_systm.h> /* XXX for struct ip */
84 #include <netinet/ip.h> /* XXX for struct ip */
85 #include <netinet/tcp.h> /* XXX for struct tcphdr */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and manage
117 * up to 64 (16 for the i82547) of them at a time. We allow up to
118 * 40 DMA segments per packet (there have been reports of jumbo frame
119 * packets with as many as 30 DMA segments!).
120 */
121 #define WM_NTXSEGS 40
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN_MAX 64
124 #define WM_TXQUEUELEN_MAX_82547 16
125 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
126 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
127 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
128 #define WM_NTXDESC 256
129 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
130 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
131 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
132
133 /*
134 * Receive descriptor list size. We have one Rx buffer for normal
135 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
136 * packet. We allocate 256 receive descriptors, each with a 2k
137 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
138 */
139 #define WM_NRXDESC 256
140 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
141 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
142 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
143
144 /*
145 * Control structures are DMA'd to the i82542 chip. We allocate them in
146 * a single clump that maps to a single DMA segment to make serveral things
147 * easier.
148 */
149 struct wm_control_data {
150 /*
151 * The transmit descriptors.
152 */
153 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
154
155 /*
156 * The receive descriptors.
157 */
158 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
159 };
160
161 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
162 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
163 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
164
165 /*
166 * Software state for transmit jobs.
167 */
168 struct wm_txsoft {
169 struct mbuf *txs_mbuf; /* head of our mbuf chain */
170 bus_dmamap_t txs_dmamap; /* our DMA map */
171 int txs_firstdesc; /* first descriptor in packet */
172 int txs_lastdesc; /* last descriptor in packet */
173 int txs_ndesc; /* # of descriptors used */
174 };
175
176 /*
177 * Software state for receive buffers. Each descriptor gets a
178 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
179 * more than one buffer, we chain them together.
180 */
181 struct wm_rxsoft {
182 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
183 bus_dmamap_t rxs_dmamap; /* our DMA map */
184 };
185
186 typedef enum {
187 WM_T_unknown = 0,
188 WM_T_82542_2_0, /* i82542 2.0 (really old) */
189 WM_T_82542_2_1, /* i82542 2.1+ (old) */
190 WM_T_82543, /* i82543 */
191 WM_T_82544, /* i82544 */
192 WM_T_82540, /* i82540 */
193 WM_T_82545, /* i82545 */
194 WM_T_82545_3, /* i82545 3.0+ */
195 WM_T_82546, /* i82546 */
196 WM_T_82546_3, /* i82546 3.0+ */
197 WM_T_82541, /* i82541 */
198 WM_T_82541_2, /* i82541 2.0+ */
199 WM_T_82547, /* i82547 */
200 WM_T_82547_2, /* i82547 2.0+ */
201 } wm_chip_type;
202
203 /*
204 * Software state per device.
205 */
206 struct wm_softc {
207 struct device sc_dev; /* generic device information */
208 bus_space_tag_t sc_st; /* bus space tag */
209 bus_space_handle_t sc_sh; /* bus space handle */
210 bus_space_tag_t sc_iot; /* I/O space tag */
211 bus_space_handle_t sc_ioh; /* I/O space handle */
212 bus_dma_tag_t sc_dmat; /* bus DMA tag */
213 struct ethercom sc_ethercom; /* ethernet common data */
214 void *sc_sdhook; /* shutdown hook */
215
216 wm_chip_type sc_type; /* chip type */
217 int sc_flags; /* flags; see below */
218 int sc_bus_speed; /* PCI/PCIX bus speed */
219 int sc_pcix_offset; /* PCIX capability register offset */
220 int sc_flowflags; /* 802.3x flow control flags */
221
222 void *sc_ih; /* interrupt cookie */
223
224 int sc_ee_addrbits; /* EEPROM address bits */
225
226 struct mii_data sc_mii; /* MII/media information */
227
228 struct callout sc_tick_ch; /* tick callout */
229
230 bus_dmamap_t sc_cddmamap; /* control data DMA map */
231 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
232
233 int sc_align_tweak;
234
235 /*
236 * Software state for the transmit and receive descriptors.
237 */
238 int sc_txnum; /* must be a power of two */
239 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
240 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
241
242 /*
243 * Control data structures.
244 */
245 struct wm_control_data *sc_control_data;
246 #define sc_txdescs sc_control_data->wcd_txdescs
247 #define sc_rxdescs sc_control_data->wcd_rxdescs
248
249 #ifdef WM_EVENT_COUNTERS
250 /* Event counters. */
251 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
252 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
253 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
254 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
255 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
256 struct evcnt sc_ev_rxintr; /* Rx interrupts */
257 struct evcnt sc_ev_linkintr; /* Link interrupts */
258
259 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
260 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
261 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
262 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
263
264 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
265 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
266 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
267
268 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
269 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
270
271 struct evcnt sc_ev_tu; /* Tx underrun */
272
273 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
274 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
275 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
276 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
277 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
278 #endif /* WM_EVENT_COUNTERS */
279
280 bus_addr_t sc_tdt_reg; /* offset of TDT register */
281
282 int sc_txfree; /* number of free Tx descriptors */
283 int sc_txnext; /* next ready Tx descriptor */
284
285 int sc_txsfree; /* number of free Tx jobs */
286 int sc_txsnext; /* next free Tx job */
287 int sc_txsdirty; /* dirty Tx jobs */
288
289 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
290 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
291
292 bus_addr_t sc_rdt_reg; /* offset of RDT register */
293
294 int sc_rxptr; /* next ready Rx descriptor/queue ent */
295 int sc_rxdiscard;
296 int sc_rxlen;
297 struct mbuf *sc_rxhead;
298 struct mbuf *sc_rxtail;
299 struct mbuf **sc_rxtailp;
300
301 uint32_t sc_ctrl; /* prototype CTRL register */
302 #if 0
303 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
304 #endif
305 uint32_t sc_icr; /* prototype interrupt bits */
306 uint32_t sc_tctl; /* prototype TCTL register */
307 uint32_t sc_rctl; /* prototype RCTL register */
308 uint32_t sc_txcw; /* prototype TXCW register */
309 uint32_t sc_tipg; /* prototype TIPG register */
310 uint32_t sc_fcrtl; /* prototype FCRTL register */
311
312 int sc_tbi_linkup; /* TBI link status */
313 int sc_tbi_anstate; /* autonegotiation state */
314
315 int sc_mchash_type; /* multicast filter offset */
316
317 #if NRND > 0
318 rndsource_element_t rnd_source; /* random source */
319 #endif
320 };
321
322 #define WM_RXCHAIN_RESET(sc) \
323 do { \
324 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
325 *(sc)->sc_rxtailp = NULL; \
326 (sc)->sc_rxlen = 0; \
327 } while (/*CONSTCOND*/0)
328
329 #define WM_RXCHAIN_LINK(sc, m) \
330 do { \
331 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
332 (sc)->sc_rxtailp = &(m)->m_next; \
333 } while (/*CONSTCOND*/0)
334
335 /* sc_flags */
336 #define WM_F_HAS_MII 0x01 /* has MII */
337 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
338 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
339 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
340 #define WM_F_BUS64 0x20 /* bus is 64-bit */
341 #define WM_F_PCIX 0x40 /* bus is PCI-X */
342 #define WM_F_CSA 0x80 /* bus is CSA */
343
344 #ifdef WM_EVENT_COUNTERS
345 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
346 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
347 #else
348 #define WM_EVCNT_INCR(ev) /* nothing */
349 #define WM_EVCNT_ADD(ev, val) /* nothing */
350 #endif
351
352 #define CSR_READ(sc, reg) \
353 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
354 #define CSR_WRITE(sc, reg, val) \
355 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
356
357 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
358 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
359
360 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
361 #define WM_CDTXADDR_HI(sc, x) \
362 (sizeof(bus_addr_t) == 8 ? \
363 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
364
365 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
366 #define WM_CDRXADDR_HI(sc, x) \
367 (sizeof(bus_addr_t) == 8 ? \
368 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
369
370 #define WM_CDTXSYNC(sc, x, n, ops) \
371 do { \
372 int __x, __n; \
373 \
374 __x = (x); \
375 __n = (n); \
376 \
377 /* If it will wrap around, sync to the end of the ring. */ \
378 if ((__x + __n) > WM_NTXDESC) { \
379 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
380 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
381 (WM_NTXDESC - __x), (ops)); \
382 __n -= (WM_NTXDESC - __x); \
383 __x = 0; \
384 } \
385 \
386 /* Now sync whatever is left. */ \
387 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
388 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
389 } while (/*CONSTCOND*/0)
390
391 #define WM_CDRXSYNC(sc, x, ops) \
392 do { \
393 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
394 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
395 } while (/*CONSTCOND*/0)
396
397 #define WM_INIT_RXDESC(sc, x) \
398 do { \
399 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
400 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
401 struct mbuf *__m = __rxs->rxs_mbuf; \
402 \
403 /* \
404 * Note: We scoot the packet forward 2 bytes in the buffer \
405 * so that the payload after the Ethernet header is aligned \
406 * to a 4-byte boundary. \
407 * \
408 * XXX BRAINDAMAGE ALERT! \
409 * The stupid chip uses the same size for every buffer, which \
410 * is set in the Receive Control register. We are using the 2K \
411 * size option, but what we REALLY want is (2K - 2)! For this \
412 * reason, we can't "scoot" packets longer than the standard \
413 * Ethernet MTU. On strict-alignment platforms, if the total \
414 * size exceeds (2K - 2) we set align_tweak to 0 and let \
415 * the upper layer copy the headers. \
416 */ \
417 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
418 \
419 wm_set_dma_addr(&__rxd->wrx_addr, \
420 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
421 __rxd->wrx_len = 0; \
422 __rxd->wrx_cksum = 0; \
423 __rxd->wrx_status = 0; \
424 __rxd->wrx_errors = 0; \
425 __rxd->wrx_special = 0; \
426 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
427 \
428 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
429 } while (/*CONSTCOND*/0)
430
431 static void wm_start(struct ifnet *);
432 static void wm_watchdog(struct ifnet *);
433 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
434 static int wm_init(struct ifnet *);
435 static void wm_stop(struct ifnet *, int);
436
437 static void wm_shutdown(void *);
438
439 static void wm_reset(struct wm_softc *);
440 static void wm_rxdrain(struct wm_softc *);
441 static int wm_add_rxbuf(struct wm_softc *, int);
442 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
443 static void wm_tick(void *);
444
445 static void wm_set_filter(struct wm_softc *);
446
447 static int wm_intr(void *);
448 static void wm_txintr(struct wm_softc *);
449 static void wm_rxintr(struct wm_softc *);
450 static void wm_linkintr(struct wm_softc *, uint32_t);
451
452 static void wm_tbi_mediainit(struct wm_softc *);
453 static int wm_tbi_mediachange(struct ifnet *);
454 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
455
456 static void wm_tbi_set_linkled(struct wm_softc *);
457 static void wm_tbi_check_link(struct wm_softc *);
458
459 static void wm_gmii_reset(struct wm_softc *);
460
461 static int wm_gmii_i82543_readreg(struct device *, int, int);
462 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
463
464 static int wm_gmii_i82544_readreg(struct device *, int, int);
465 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
466
467 static void wm_gmii_statchg(struct device *);
468
469 static void wm_gmii_mediainit(struct wm_softc *);
470 static int wm_gmii_mediachange(struct ifnet *);
471 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
472
473 static int wm_match(struct device *, struct cfdata *, void *);
474 static void wm_attach(struct device *, struct device *, void *);
475
476 CFATTACH_DECL(wm, sizeof(struct wm_softc),
477 wm_match, wm_attach, NULL, NULL);
478
479 /*
480 * Devices supported by this driver.
481 */
482 const struct wm_product {
483 pci_vendor_id_t wmp_vendor;
484 pci_product_id_t wmp_product;
485 const char *wmp_name;
486 wm_chip_type wmp_type;
487 int wmp_flags;
488 #define WMP_F_1000X 0x01
489 #define WMP_F_1000T 0x02
490 } wm_products[] = {
491 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
492 "Intel i82542 1000BASE-X Ethernet",
493 WM_T_82542_2_1, WMP_F_1000X },
494
495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
496 "Intel i82543GC 1000BASE-X Ethernet",
497 WM_T_82543, WMP_F_1000X },
498
499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
500 "Intel i82543GC 1000BASE-T Ethernet",
501 WM_T_82543, WMP_F_1000T },
502
503 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
504 "Intel i82544EI 1000BASE-T Ethernet",
505 WM_T_82544, WMP_F_1000T },
506
507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
508 "Intel i82544EI 1000BASE-X Ethernet",
509 WM_T_82544, WMP_F_1000X },
510
511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
512 "Intel i82544GC 1000BASE-T Ethernet",
513 WM_T_82544, WMP_F_1000T },
514
515 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
516 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
517 WM_T_82544, WMP_F_1000T },
518
519 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
520 "Intel i82540EM 1000BASE-T Ethernet",
521 WM_T_82540, WMP_F_1000T },
522
523 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
524 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
525 WM_T_82540, WMP_F_1000T },
526
527 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
528 "Intel i82540EP 1000BASE-T Ethernet",
529 WM_T_82540, WMP_F_1000T },
530
531 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
532 "Intel i82540EP 1000BASE-T Ethernet",
533 WM_T_82540, WMP_F_1000T },
534
535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
536 "Intel i82540EP 1000BASE-T Ethernet",
537 WM_T_82540, WMP_F_1000T },
538
539 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
540 "Intel i82545EM 1000BASE-T Ethernet",
541 WM_T_82545, WMP_F_1000T },
542
543 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
544 "Intel i82545GM 1000BASE-T Ethernet",
545 WM_T_82545_3, WMP_F_1000T },
546
547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
548 "Intel i82545GM 1000BASE-X Ethernet",
549 WM_T_82545_3, WMP_F_1000X },
550 #if 0
551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
552 "Intel i82545GM Gigabit Ethernet (SERDES)",
553 WM_T_82545_3, WMP_F_SERDES },
554 #endif
555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
556 "Intel i82546EB 1000BASE-T Ethernet",
557 WM_T_82546, WMP_F_1000T },
558
559 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
560 "Intel i82546EB 1000BASE-T Ethernet",
561 WM_T_82546, WMP_F_1000T },
562
563 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
564 "Intel i82545EM 1000BASE-X Ethernet",
565 WM_T_82545, WMP_F_1000X },
566
567 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
568 "Intel i82546EB 1000BASE-X Ethernet",
569 WM_T_82546, WMP_F_1000X },
570
571 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
572 "Intel i82546GB 1000BASE-T Ethernet",
573 WM_T_82546_3, WMP_F_1000T },
574
575 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
576 "Intel i82546GB 1000BASE-X Ethernet",
577 WM_T_82546_3, WMP_F_1000X },
578 #if 0
579 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
580 "Intel i82546GB Gigabit Ethernet (SERDES)",
581 WM_T_82546_3, WMP_F_SERDES },
582 #endif
583 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
584 "Intel i82541EI 1000BASE-T Ethernet",
585 WM_T_82541, WMP_F_1000T },
586
587 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
588 "Intel i82541EI Mobile 1000BASE-T Ethernet",
589 WM_T_82541, WMP_F_1000T },
590
591 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
592 "Intel i82541ER 1000BASE-T Ethernet",
593 WM_T_82541_2, WMP_F_1000T },
594
595 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
596 "Intel i82541GI 1000BASE-T Ethernet",
597 WM_T_82541_2, WMP_F_1000T },
598
599 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
600 "Intel i82541GI Mobile 1000BASE-T Ethernet",
601 WM_T_82541_2, WMP_F_1000T },
602
603 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
604 "Intel i82547EI 1000BASE-T Ethernet",
605 WM_T_82547, WMP_F_1000T },
606
607 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
608 "Intel i82547GI 1000BASE-T Ethernet",
609 WM_T_82547_2, WMP_F_1000T },
610 { 0, 0,
611 NULL,
612 0, 0 },
613 };
614
615 #ifdef WM_EVENT_COUNTERS
616 #if WM_NTXSEGS != 40
617 #error Update wm_txseg_evcnt_names
618 #endif
619 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
620 "txseg1",
621 "txseg2",
622 "txseg3",
623 "txseg4",
624 "txseg5",
625 "txseg6",
626 "txseg7",
627 "txseg8",
628 "txseg9",
629 "txseg10",
630 "txseg11",
631 "txseg12",
632 "txseg13",
633 "txseg14",
634 "txseg15",
635 "txseg16",
636 "txseg17",
637 "txseg18",
638 "txseg19",
639 "txseg20",
640 "txseg21",
641 "txseg22",
642 "txseg23",
643 "txseg24",
644 "txseg25",
645 "txseg26",
646 "txseg27",
647 "txseg28",
648 "txseg29",
649 "txseg30",
650 "txseg31",
651 "txseg32",
652 "txseg33",
653 "txseg34",
654 "txseg35",
655 "txseg36",
656 "txseg37",
657 "txseg38",
658 "txseg39",
659 "txseg40",
660 };
661 #endif /* WM_EVENT_COUNTERS */
662
663 #if 0 /* Not currently used */
664 static __inline uint32_t
665 wm_io_read(struct wm_softc *sc, int reg)
666 {
667
668 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
669 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
670 }
671 #endif
672
673 static __inline void
674 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
675 {
676
677 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
678 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
679 }
680
681 static __inline void
682 wm_set_dma_addr(__volatile wiseman_addr_t *wa, bus_addr_t v)
683 {
684 wa->wa_low = htole32(v & 0xffffffffU);
685 if (sizeof(bus_addr_t) == 8)
686 wa->wa_high = htole32((uint64_t) v >> 32);
687 else
688 wa->wa_high = 0;
689 }
690
691 static const struct wm_product *
692 wm_lookup(const struct pci_attach_args *pa)
693 {
694 const struct wm_product *wmp;
695
696 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
697 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
698 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
699 return (wmp);
700 }
701 return (NULL);
702 }
703
704 static int
705 wm_match(struct device *parent, struct cfdata *cf, void *aux)
706 {
707 struct pci_attach_args *pa = aux;
708
709 if (wm_lookup(pa) != NULL)
710 return (1);
711
712 return (0);
713 }
714
715 static void
716 wm_attach(struct device *parent, struct device *self, void *aux)
717 {
718 struct wm_softc *sc = (void *) self;
719 struct pci_attach_args *pa = aux;
720 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
721 pci_chipset_tag_t pc = pa->pa_pc;
722 pci_intr_handle_t ih;
723 const char *intrstr = NULL;
724 const char *eetype;
725 bus_space_tag_t memt;
726 bus_space_handle_t memh;
727 bus_dma_segment_t seg;
728 int memh_valid;
729 int i, rseg, error;
730 const struct wm_product *wmp;
731 uint8_t enaddr[ETHER_ADDR_LEN];
732 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
733 pcireg_t preg, memtype;
734 uint32_t reg;
735 int pmreg;
736
737 callout_init(&sc->sc_tick_ch);
738
739 wmp = wm_lookup(pa);
740 if (wmp == NULL) {
741 printf("\n");
742 panic("wm_attach: impossible");
743 }
744
745 if (pci_dma64_available(pa))
746 sc->sc_dmat = pa->pa_dmat64;
747 else
748 sc->sc_dmat = pa->pa_dmat;
749
750 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
751 aprint_naive(": Ethernet controller\n");
752 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
753
754 sc->sc_type = wmp->wmp_type;
755 if (sc->sc_type < WM_T_82543) {
756 if (preg < 2) {
757 aprint_error("%s: i82542 must be at least rev. 2\n",
758 sc->sc_dev.dv_xname);
759 return;
760 }
761 if (preg < 3)
762 sc->sc_type = WM_T_82542_2_0;
763 }
764
765 /*
766 * Map the device. All devices support memory-mapped acccess,
767 * and it is really required for normal operation.
768 */
769 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
770 switch (memtype) {
771 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
772 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
773 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
774 memtype, 0, &memt, &memh, NULL, NULL) == 0);
775 break;
776 default:
777 memh_valid = 0;
778 }
779
780 if (memh_valid) {
781 sc->sc_st = memt;
782 sc->sc_sh = memh;
783 } else {
784 aprint_error("%s: unable to map device registers\n",
785 sc->sc_dev.dv_xname);
786 return;
787 }
788
789 /*
790 * In addition, i82544 and later support I/O mapped indirect
791 * register access. It is not desirable (nor supported in
792 * this driver) to use it for normal operation, though it is
793 * required to work around bugs in some chip versions.
794 */
795 if (sc->sc_type >= WM_T_82544) {
796 /* First we have to find the I/O BAR. */
797 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
798 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
799 PCI_MAPREG_TYPE_IO)
800 break;
801 }
802 if (i == PCI_MAPREG_END)
803 aprint_error("%s: WARNING: unable to find I/O BAR\n",
804 sc->sc_dev.dv_xname);
805 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
806 0, &sc->sc_iot, &sc->sc_ioh,
807 NULL, NULL) == 0)
808 sc->sc_flags |= WM_F_IOH_VALID;
809 else
810 aprint_error("%s: WARNING: unable to map I/O space\n",
811 sc->sc_dev.dv_xname);
812 }
813
814 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
815 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
816 preg |= PCI_COMMAND_MASTER_ENABLE;
817 if (sc->sc_type < WM_T_82542_2_1)
818 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
819 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
820
821 /* Get it out of power save mode, if needed. */
822 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
823 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
824 PCI_PMCSR_STATE_MASK;
825 if (preg == PCI_PMCSR_STATE_D3) {
826 /*
827 * The card has lost all configuration data in
828 * this state, so punt.
829 */
830 aprint_error("%s: unable to wake from power state D3\n",
831 sc->sc_dev.dv_xname);
832 return;
833 }
834 if (preg != PCI_PMCSR_STATE_D0) {
835 aprint_normal("%s: waking up from power state D%d\n",
836 sc->sc_dev.dv_xname, preg);
837 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
838 PCI_PMCSR_STATE_D0);
839 }
840 }
841
842 /*
843 * Map and establish our interrupt.
844 */
845 if (pci_intr_map(pa, &ih)) {
846 aprint_error("%s: unable to map interrupt\n",
847 sc->sc_dev.dv_xname);
848 return;
849 }
850 intrstr = pci_intr_string(pc, ih);
851 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
852 if (sc->sc_ih == NULL) {
853 aprint_error("%s: unable to establish interrupt",
854 sc->sc_dev.dv_xname);
855 if (intrstr != NULL)
856 aprint_normal(" at %s", intrstr);
857 aprint_normal("\n");
858 return;
859 }
860 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
861
862 /*
863 * Determine a few things about the bus we're connected to.
864 */
865 if (sc->sc_type < WM_T_82543) {
866 /* We don't really know the bus characteristics here. */
867 sc->sc_bus_speed = 33;
868 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
869 /*
870 * CSA (Communication Streaming Architecture) is about as fast
871 * a 32-bit 66MHz PCI Bus.
872 */
873 sc->sc_flags |= WM_F_CSA;
874 sc->sc_bus_speed = 66;
875 aprint_verbose("%s: Communication Streaming Architecture\n",
876 sc->sc_dev.dv_xname);
877 } else {
878 reg = CSR_READ(sc, WMREG_STATUS);
879 if (reg & STATUS_BUS64)
880 sc->sc_flags |= WM_F_BUS64;
881 if (sc->sc_type >= WM_T_82544 &&
882 (reg & STATUS_PCIX_MODE) != 0) {
883 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
884
885 sc->sc_flags |= WM_F_PCIX;
886 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
887 PCI_CAP_PCIX,
888 &sc->sc_pcix_offset, NULL) == 0)
889 aprint_error("%s: unable to find PCIX "
890 "capability\n", sc->sc_dev.dv_xname);
891 else if (sc->sc_type != WM_T_82545_3 &&
892 sc->sc_type != WM_T_82546_3) {
893 /*
894 * Work around a problem caused by the BIOS
895 * setting the max memory read byte count
896 * incorrectly.
897 */
898 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
899 sc->sc_pcix_offset + PCI_PCIX_CMD);
900 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
901 sc->sc_pcix_offset + PCI_PCIX_STATUS);
902
903 bytecnt =
904 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
905 PCI_PCIX_CMD_BYTECNT_SHIFT;
906 maxb =
907 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
908 PCI_PCIX_STATUS_MAXB_SHIFT;
909 if (bytecnt > maxb) {
910 aprint_verbose("%s: resetting PCI-X "
911 "MMRBC: %d -> %d\n",
912 sc->sc_dev.dv_xname,
913 512 << bytecnt, 512 << maxb);
914 pcix_cmd = (pcix_cmd &
915 ~PCI_PCIX_CMD_BYTECNT_MASK) |
916 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
917 pci_conf_write(pa->pa_pc, pa->pa_tag,
918 sc->sc_pcix_offset + PCI_PCIX_CMD,
919 pcix_cmd);
920 }
921 }
922 }
923 /*
924 * The quad port adapter is special; it has a PCIX-PCIX
925 * bridge on the board, and can run the secondary bus at
926 * a higher speed.
927 */
928 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
929 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
930 : 66;
931 } else if (sc->sc_flags & WM_F_PCIX) {
932 switch (reg & STATUS_PCIXSPD_MASK) {
933 case STATUS_PCIXSPD_50_66:
934 sc->sc_bus_speed = 66;
935 break;
936 case STATUS_PCIXSPD_66_100:
937 sc->sc_bus_speed = 100;
938 break;
939 case STATUS_PCIXSPD_100_133:
940 sc->sc_bus_speed = 133;
941 break;
942 default:
943 aprint_error(
944 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
945 sc->sc_dev.dv_xname,
946 reg & STATUS_PCIXSPD_MASK);
947 sc->sc_bus_speed = 66;
948 }
949 } else
950 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
951 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
952 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
953 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
954 }
955
956 /*
957 * Allocate the control data structures, and create and load the
958 * DMA map for it.
959 *
960 * NOTE: All Tx descriptors must be in the same 4G segment of
961 * memory. So must Rx descriptors. We simplify by allocating
962 * both sets within the same 4G segment.
963 */
964 if ((error = bus_dmamem_alloc(sc->sc_dmat,
965 sizeof(struct wm_control_data),
966 PAGE_SIZE, (bus_size_t) 0x100000000ULL,
967 &seg, 1, &rseg, 0)) != 0) {
968 aprint_error(
969 "%s: unable to allocate control data, error = %d\n",
970 sc->sc_dev.dv_xname, error);
971 goto fail_0;
972 }
973
974 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
975 sizeof(struct wm_control_data),
976 (caddr_t *)&sc->sc_control_data, 0)) != 0) {
977 aprint_error("%s: unable to map control data, error = %d\n",
978 sc->sc_dev.dv_xname, error);
979 goto fail_1;
980 }
981
982 if ((error = bus_dmamap_create(sc->sc_dmat,
983 sizeof(struct wm_control_data), 1,
984 sizeof(struct wm_control_data), 0, 0,
985 &sc->sc_cddmamap)) != 0) {
986 aprint_error("%s: unable to create control data DMA map, "
987 "error = %d\n", sc->sc_dev.dv_xname, error);
988 goto fail_2;
989 }
990
991 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
992 sc->sc_control_data,
993 sizeof(struct wm_control_data), NULL,
994 0)) != 0) {
995 aprint_error(
996 "%s: unable to load control data DMA map, error = %d\n",
997 sc->sc_dev.dv_xname, error);
998 goto fail_3;
999 }
1000
1001
1002 /*
1003 * Create the transmit buffer DMA maps.
1004 */
1005 WM_TXQUEUELEN(sc) =
1006 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1007 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1008 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1009 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
1010 WM_NTXSEGS, MCLBYTES, 0, 0,
1011 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1012 aprint_error("%s: unable to create Tx DMA map %d, "
1013 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1014 goto fail_4;
1015 }
1016 }
1017
1018 /*
1019 * Create the receive buffer DMA maps.
1020 */
1021 for (i = 0; i < WM_NRXDESC; i++) {
1022 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1023 MCLBYTES, 0, 0,
1024 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1025 aprint_error("%s: unable to create Rx DMA map %d, "
1026 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1027 goto fail_5;
1028 }
1029 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1030 }
1031
1032 /*
1033 * Reset the chip to a known state.
1034 */
1035 wm_reset(sc);
1036
1037 /*
1038 * Get some information about the EEPROM.
1039 */
1040 if (sc->sc_type >= WM_T_82540)
1041 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1042 if (sc->sc_type <= WM_T_82544)
1043 sc->sc_ee_addrbits = 6;
1044 else if (sc->sc_type <= WM_T_82546_3) {
1045 reg = CSR_READ(sc, WMREG_EECD);
1046 if (reg & EECD_EE_SIZE)
1047 sc->sc_ee_addrbits = 8;
1048 else
1049 sc->sc_ee_addrbits = 6;
1050 } else if (sc->sc_type <= WM_T_82547_2) {
1051 reg = CSR_READ(sc, WMREG_EECD);
1052 if (reg & EECD_EE_TYPE) {
1053 sc->sc_flags |= WM_F_EEPROM_SPI;
1054 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1055 } else
1056 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1057 } else {
1058 /* Assume everything else is SPI. */
1059 reg = CSR_READ(sc, WMREG_EECD);
1060 sc->sc_flags |= WM_F_EEPROM_SPI;
1061 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1062 }
1063 if (sc->sc_flags & WM_F_EEPROM_SPI)
1064 eetype = "SPI";
1065 else
1066 eetype = "MicroWire";
1067 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1068 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1069 sc->sc_ee_addrbits, eetype);
1070
1071 /*
1072 * Read the Ethernet address from the EEPROM.
1073 */
1074 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1075 sizeof(myea) / sizeof(myea[0]), myea)) {
1076 aprint_error("%s: unable to read Ethernet address\n",
1077 sc->sc_dev.dv_xname);
1078 return;
1079 }
1080 enaddr[0] = myea[0] & 0xff;
1081 enaddr[1] = myea[0] >> 8;
1082 enaddr[2] = myea[1] & 0xff;
1083 enaddr[3] = myea[1] >> 8;
1084 enaddr[4] = myea[2] & 0xff;
1085 enaddr[5] = myea[2] >> 8;
1086
1087 /*
1088 * Toggle the LSB of the MAC address on the second port
1089 * of the i82546.
1090 */
1091 if (sc->sc_type == WM_T_82546) {
1092 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1093 enaddr[5] ^= 1;
1094 }
1095
1096 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1097 ether_sprintf(enaddr));
1098
1099 /*
1100 * Read the config info from the EEPROM, and set up various
1101 * bits in the control registers based on their contents.
1102 */
1103 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1104 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1105 sc->sc_dev.dv_xname);
1106 return;
1107 }
1108 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1109 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1110 sc->sc_dev.dv_xname);
1111 return;
1112 }
1113 if (sc->sc_type >= WM_T_82544) {
1114 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1115 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1116 sc->sc_dev.dv_xname);
1117 return;
1118 }
1119 }
1120
1121 if (cfg1 & EEPROM_CFG1_ILOS)
1122 sc->sc_ctrl |= CTRL_ILOS;
1123 if (sc->sc_type >= WM_T_82544) {
1124 sc->sc_ctrl |=
1125 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1126 CTRL_SWDPIO_SHIFT;
1127 sc->sc_ctrl |=
1128 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1129 CTRL_SWDPINS_SHIFT;
1130 } else {
1131 sc->sc_ctrl |=
1132 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1133 CTRL_SWDPIO_SHIFT;
1134 }
1135
1136 #if 0
1137 if (sc->sc_type >= WM_T_82544) {
1138 if (cfg1 & EEPROM_CFG1_IPS0)
1139 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1140 if (cfg1 & EEPROM_CFG1_IPS1)
1141 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1142 sc->sc_ctrl_ext |=
1143 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1144 CTRL_EXT_SWDPIO_SHIFT;
1145 sc->sc_ctrl_ext |=
1146 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1147 CTRL_EXT_SWDPINS_SHIFT;
1148 } else {
1149 sc->sc_ctrl_ext |=
1150 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1151 CTRL_EXT_SWDPIO_SHIFT;
1152 }
1153 #endif
1154
1155 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1156 #if 0
1157 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1158 #endif
1159
1160 /*
1161 * Set up some register offsets that are different between
1162 * the i82542 and the i82543 and later chips.
1163 */
1164 if (sc->sc_type < WM_T_82543) {
1165 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1166 sc->sc_tdt_reg = WMREG_OLD_TDT;
1167 } else {
1168 sc->sc_rdt_reg = WMREG_RDT;
1169 sc->sc_tdt_reg = WMREG_TDT;
1170 }
1171
1172 /*
1173 * Determine if we're TBI or GMII mode, and initialize the
1174 * media structures accordingly.
1175 */
1176 if (sc->sc_type < WM_T_82543 ||
1177 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1178 if (wmp->wmp_flags & WMP_F_1000T)
1179 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1180 "product!\n", sc->sc_dev.dv_xname);
1181 wm_tbi_mediainit(sc);
1182 } else {
1183 if (wmp->wmp_flags & WMP_F_1000X)
1184 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1185 "product!\n", sc->sc_dev.dv_xname);
1186 wm_gmii_mediainit(sc);
1187 }
1188
1189 ifp = &sc->sc_ethercom.ec_if;
1190 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1191 ifp->if_softc = sc;
1192 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1193 ifp->if_ioctl = wm_ioctl;
1194 ifp->if_start = wm_start;
1195 ifp->if_watchdog = wm_watchdog;
1196 ifp->if_init = wm_init;
1197 ifp->if_stop = wm_stop;
1198 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1199 IFQ_SET_READY(&ifp->if_snd);
1200
1201 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1202
1203 /*
1204 * If we're a i82543 or greater, we can support VLANs.
1205 */
1206 if (sc->sc_type >= WM_T_82543)
1207 sc->sc_ethercom.ec_capabilities |=
1208 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1209
1210 /*
1211 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1212 * on i82543 and later.
1213 */
1214 if (sc->sc_type >= WM_T_82543)
1215 ifp->if_capabilities |=
1216 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1217
1218 /*
1219 * Attach the interface.
1220 */
1221 if_attach(ifp);
1222 ether_ifattach(ifp, enaddr);
1223 #if NRND > 0
1224 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1225 RND_TYPE_NET, 0);
1226 #endif
1227
1228 #ifdef WM_EVENT_COUNTERS
1229 /* Attach event counters. */
1230 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1231 NULL, sc->sc_dev.dv_xname, "txsstall");
1232 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1233 NULL, sc->sc_dev.dv_xname, "txdstall");
1234 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1235 NULL, sc->sc_dev.dv_xname, "txforceintr");
1236 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1237 NULL, sc->sc_dev.dv_xname, "txdw");
1238 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1239 NULL, sc->sc_dev.dv_xname, "txqe");
1240 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1241 NULL, sc->sc_dev.dv_xname, "rxintr");
1242 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1243 NULL, sc->sc_dev.dv_xname, "linkintr");
1244
1245 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1246 NULL, sc->sc_dev.dv_xname, "rxipsum");
1247 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1248 NULL, sc->sc_dev.dv_xname, "rxtusum");
1249 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1250 NULL, sc->sc_dev.dv_xname, "txipsum");
1251 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1252 NULL, sc->sc_dev.dv_xname, "txtusum");
1253
1254 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1255 NULL, sc->sc_dev.dv_xname, "txctx init");
1256 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1257 NULL, sc->sc_dev.dv_xname, "txctx hit");
1258 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1259 NULL, sc->sc_dev.dv_xname, "txctx miss");
1260
1261 for (i = 0; i < WM_NTXSEGS; i++)
1262 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1263 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1264
1265 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1266 NULL, sc->sc_dev.dv_xname, "txdrop");
1267
1268 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1269 NULL, sc->sc_dev.dv_xname, "tu");
1270
1271 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1272 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1273 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1274 NULL, sc->sc_dev.dv_xname, "tx_xon");
1275 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1276 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1277 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1278 NULL, sc->sc_dev.dv_xname, "rx_xon");
1279 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1280 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1281 #endif /* WM_EVENT_COUNTERS */
1282
1283 /*
1284 * Make sure the interface is shutdown during reboot.
1285 */
1286 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1287 if (sc->sc_sdhook == NULL)
1288 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1289 sc->sc_dev.dv_xname);
1290 return;
1291
1292 /*
1293 * Free any resources we've allocated during the failed attach
1294 * attempt. Do this in reverse order and fall through.
1295 */
1296 fail_5:
1297 for (i = 0; i < WM_NRXDESC; i++) {
1298 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1299 bus_dmamap_destroy(sc->sc_dmat,
1300 sc->sc_rxsoft[i].rxs_dmamap);
1301 }
1302 fail_4:
1303 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1304 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1305 bus_dmamap_destroy(sc->sc_dmat,
1306 sc->sc_txsoft[i].txs_dmamap);
1307 }
1308 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1309 fail_3:
1310 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1311 fail_2:
1312 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1313 sizeof(struct wm_control_data));
1314 fail_1:
1315 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1316 fail_0:
1317 return;
1318 }
1319
1320 /*
1321 * wm_shutdown:
1322 *
1323 * Make sure the interface is stopped at reboot time.
1324 */
1325 static void
1326 wm_shutdown(void *arg)
1327 {
1328 struct wm_softc *sc = arg;
1329
1330 wm_stop(&sc->sc_ethercom.ec_if, 1);
1331 }
1332
1333 /*
1334 * wm_tx_cksum:
1335 *
1336 * Set up TCP/IP checksumming parameters for the
1337 * specified packet.
1338 */
1339 static int
1340 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1341 uint8_t *fieldsp)
1342 {
1343 struct mbuf *m0 = txs->txs_mbuf;
1344 struct livengood_tcpip_ctxdesc *t;
1345 uint32_t ipcs, tucs;
1346 struct ip *ip;
1347 struct ether_header *eh;
1348 int offset, iphl;
1349 uint8_t fields = 0;
1350
1351 /*
1352 * XXX It would be nice if the mbuf pkthdr had offset
1353 * fields for the protocol headers.
1354 */
1355
1356 eh = mtod(m0, struct ether_header *);
1357 switch (htons(eh->ether_type)) {
1358 case ETHERTYPE_IP:
1359 iphl = sizeof(struct ip);
1360 offset = ETHER_HDR_LEN;
1361 break;
1362
1363 case ETHERTYPE_VLAN:
1364 iphl = sizeof(struct ip);
1365 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1366 break;
1367
1368 default:
1369 /*
1370 * Don't support this protocol or encapsulation.
1371 */
1372 *fieldsp = 0;
1373 *cmdp = 0;
1374 return (0);
1375 }
1376
1377 if (m0->m_len < (offset + iphl)) {
1378 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1379 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1380 "packet dropped\n", sc->sc_dev.dv_xname);
1381 return (ENOMEM);
1382 }
1383 m0 = txs->txs_mbuf;
1384 }
1385
1386 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1387 iphl = ip->ip_hl << 2;
1388
1389 /*
1390 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1391 * offload feature, if we load the context descriptor, we
1392 * MUST provide valid values for IPCSS and TUCSS fields.
1393 */
1394
1395 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1396 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1397 fields |= WTX_IXSM;
1398 ipcs = WTX_TCPIP_IPCSS(offset) |
1399 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1400 WTX_TCPIP_IPCSE(offset + iphl - 1);
1401 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1402 /* Use the cached value. */
1403 ipcs = sc->sc_txctx_ipcs;
1404 } else {
1405 /* Just initialize it to the likely value anyway. */
1406 ipcs = WTX_TCPIP_IPCSS(offset) |
1407 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1408 WTX_TCPIP_IPCSE(offset + iphl - 1);
1409 }
1410
1411 offset += iphl;
1412
1413 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1414 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1415 fields |= WTX_TXSM;
1416 tucs = WTX_TCPIP_TUCSS(offset) |
1417 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1418 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1419 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1420 /* Use the cached value. */
1421 tucs = sc->sc_txctx_tucs;
1422 } else {
1423 /* Just initialize it to a valid TCP context. */
1424 tucs = WTX_TCPIP_TUCSS(offset) |
1425 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1426 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1427 }
1428
1429 if (sc->sc_txctx_ipcs == ipcs &&
1430 sc->sc_txctx_tucs == tucs) {
1431 /* Cached context is fine. */
1432 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1433 } else {
1434 /* Fill in the context descriptor. */
1435 #ifdef WM_EVENT_COUNTERS
1436 if (sc->sc_txctx_ipcs == 0xffffffff &&
1437 sc->sc_txctx_tucs == 0xffffffff)
1438 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1439 else
1440 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1441 #endif
1442 t = (struct livengood_tcpip_ctxdesc *)
1443 &sc->sc_txdescs[sc->sc_txnext];
1444 t->tcpip_ipcs = htole32(ipcs);
1445 t->tcpip_tucs = htole32(tucs);
1446 t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1447 t->tcpip_seg = 0;
1448 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1449
1450 sc->sc_txctx_ipcs = ipcs;
1451 sc->sc_txctx_tucs = tucs;
1452
1453 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1454 txs->txs_ndesc++;
1455 }
1456
1457 *cmdp = WTX_CMD_DEXT | WTX_DTYP_D;
1458 *fieldsp = fields;
1459
1460 return (0);
1461 }
1462
1463 /*
1464 * wm_start: [ifnet interface function]
1465 *
1466 * Start packet transmission on the interface.
1467 */
1468 static void
1469 wm_start(struct ifnet *ifp)
1470 {
1471 struct wm_softc *sc = ifp->if_softc;
1472 struct mbuf *m0;
1473 #if 0 /* XXXJRT */
1474 struct m_tag *mtag;
1475 #endif
1476 struct wm_txsoft *txs;
1477 bus_dmamap_t dmamap;
1478 int error, nexttx, lasttx = -1, ofree, seg;
1479 uint32_t cksumcmd;
1480 uint8_t cksumfields;
1481
1482 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1483 return;
1484
1485 /*
1486 * Remember the previous number of free descriptors.
1487 */
1488 ofree = sc->sc_txfree;
1489
1490 /*
1491 * Loop through the send queue, setting up transmit descriptors
1492 * until we drain the queue, or use up all available transmit
1493 * descriptors.
1494 */
1495 for (;;) {
1496 /* Grab a packet off the queue. */
1497 IFQ_POLL(&ifp->if_snd, m0);
1498 if (m0 == NULL)
1499 break;
1500
1501 DPRINTF(WM_DEBUG_TX,
1502 ("%s: TX: have packet to transmit: %p\n",
1503 sc->sc_dev.dv_xname, m0));
1504
1505 /* Get a work queue entry. */
1506 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
1507 wm_txintr(sc);
1508 if (sc->sc_txsfree == 0) {
1509 DPRINTF(WM_DEBUG_TX,
1510 ("%s: TX: no free job descriptors\n",
1511 sc->sc_dev.dv_xname));
1512 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1513 break;
1514 }
1515 }
1516
1517 txs = &sc->sc_txsoft[sc->sc_txsnext];
1518 dmamap = txs->txs_dmamap;
1519
1520 /*
1521 * Load the DMA map. If this fails, the packet either
1522 * didn't fit in the allotted number of segments, or we
1523 * were short on resources. For the too-many-segments
1524 * case, we simply report an error and drop the packet,
1525 * since we can't sanely copy a jumbo packet to a single
1526 * buffer.
1527 */
1528 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1529 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1530 if (error) {
1531 if (error == EFBIG) {
1532 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1533 printf("%s: Tx packet consumes too many "
1534 "DMA segments, dropping...\n",
1535 sc->sc_dev.dv_xname);
1536 IFQ_DEQUEUE(&ifp->if_snd, m0);
1537 m_freem(m0);
1538 continue;
1539 }
1540 /*
1541 * Short on resources, just stop for now.
1542 */
1543 DPRINTF(WM_DEBUG_TX,
1544 ("%s: TX: dmamap load failed: %d\n",
1545 sc->sc_dev.dv_xname, error));
1546 break;
1547 }
1548
1549 /*
1550 * Ensure we have enough descriptors free to describe
1551 * the packet. Note, we always reserve one descriptor
1552 * at the end of the ring due to the semantics of the
1553 * TDT register, plus one more in the event we need
1554 * to re-load checksum offload context.
1555 */
1556 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1557 /*
1558 * Not enough free descriptors to transmit this
1559 * packet. We haven't committed anything yet,
1560 * so just unload the DMA map, put the packet
1561 * pack on the queue, and punt. Notify the upper
1562 * layer that there are no more slots left.
1563 */
1564 DPRINTF(WM_DEBUG_TX,
1565 ("%s: TX: need %d descriptors, have %d\n",
1566 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1567 sc->sc_txfree - 1));
1568 ifp->if_flags |= IFF_OACTIVE;
1569 bus_dmamap_unload(sc->sc_dmat, dmamap);
1570 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1571 break;
1572 }
1573
1574 IFQ_DEQUEUE(&ifp->if_snd, m0);
1575
1576 /*
1577 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1578 */
1579
1580 /* Sync the DMA map. */
1581 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1582 BUS_DMASYNC_PREWRITE);
1583
1584 DPRINTF(WM_DEBUG_TX,
1585 ("%s: TX: packet has %d DMA segments\n",
1586 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1587
1588 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1589
1590 /*
1591 * Store a pointer to the packet so that we can free it
1592 * later.
1593 *
1594 * Initially, we consider the number of descriptors the
1595 * packet uses the number of DMA segments. This may be
1596 * incremented by 1 if we do checksum offload (a descriptor
1597 * is used to set the checksum context).
1598 */
1599 txs->txs_mbuf = m0;
1600 txs->txs_firstdesc = sc->sc_txnext;
1601 txs->txs_ndesc = dmamap->dm_nsegs;
1602
1603 /*
1604 * Set up checksum offload parameters for
1605 * this packet.
1606 */
1607 if (m0->m_pkthdr.csum_flags &
1608 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1609 if (wm_tx_cksum(sc, txs, &cksumcmd,
1610 &cksumfields) != 0) {
1611 /* Error message already displayed. */
1612 bus_dmamap_unload(sc->sc_dmat, dmamap);
1613 continue;
1614 }
1615 } else {
1616 cksumcmd = 0;
1617 cksumfields = 0;
1618 }
1619
1620 cksumcmd |= WTX_CMD_IDE;
1621
1622 /*
1623 * Initialize the transmit descriptor.
1624 */
1625 for (nexttx = sc->sc_txnext, seg = 0;
1626 seg < dmamap->dm_nsegs;
1627 seg++, nexttx = WM_NEXTTX(nexttx)) {
1628 wm_set_dma_addr(&sc->sc_txdescs[nexttx].wtx_addr,
1629 dmamap->dm_segs[seg].ds_addr);
1630 sc->sc_txdescs[nexttx].wtx_cmdlen =
1631 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1632 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
1633 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
1634 cksumfields;
1635 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
1636 lasttx = nexttx;
1637
1638 DPRINTF(WM_DEBUG_TX,
1639 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1640 sc->sc_dev.dv_xname, nexttx,
1641 (u_int)le32toh(dmamap->dm_segs[seg].ds_addr),
1642 (u_int)le32toh(dmamap->dm_segs[seg].ds_len)));
1643 }
1644
1645 KASSERT(lasttx != -1);
1646
1647 /*
1648 * Set up the command byte on the last descriptor of
1649 * the packet. If we're in the interrupt delay window,
1650 * delay the interrupt.
1651 */
1652 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1653 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1654
1655 #if 0 /* XXXJRT */
1656 /*
1657 * If VLANs are enabled and the packet has a VLAN tag, set
1658 * up the descriptor to encapsulate the packet for us.
1659 *
1660 * This is only valid on the last descriptor of the packet.
1661 */
1662 if (sc->sc_ethercom.ec_nvlans != 0 &&
1663 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1664 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1665 htole32(WTX_CMD_VLE);
1666 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
1667 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1668 }
1669 #endif /* XXXJRT */
1670
1671 txs->txs_lastdesc = lasttx;
1672
1673 DPRINTF(WM_DEBUG_TX,
1674 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1675 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
1676
1677 /* Sync the descriptors we're using. */
1678 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1679 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1680
1681 /* Give the packet to the chip. */
1682 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1683
1684 DPRINTF(WM_DEBUG_TX,
1685 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1686
1687 DPRINTF(WM_DEBUG_TX,
1688 ("%s: TX: finished transmitting packet, job %d\n",
1689 sc->sc_dev.dv_xname, sc->sc_txsnext));
1690
1691 /* Advance the tx pointer. */
1692 sc->sc_txfree -= txs->txs_ndesc;
1693 sc->sc_txnext = nexttx;
1694
1695 sc->sc_txsfree--;
1696 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
1697
1698 #if NBPFILTER > 0
1699 /* Pass the packet to any BPF listeners. */
1700 if (ifp->if_bpf)
1701 bpf_mtap(ifp->if_bpf, m0);
1702 #endif /* NBPFILTER > 0 */
1703 }
1704
1705 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1706 /* No more slots; notify upper layer. */
1707 ifp->if_flags |= IFF_OACTIVE;
1708 }
1709
1710 if (sc->sc_txfree != ofree) {
1711 /* Set a watchdog timer in case the chip flakes out. */
1712 ifp->if_timer = 5;
1713 }
1714 }
1715
1716 /*
1717 * wm_watchdog: [ifnet interface function]
1718 *
1719 * Watchdog timer handler.
1720 */
1721 static void
1722 wm_watchdog(struct ifnet *ifp)
1723 {
1724 struct wm_softc *sc = ifp->if_softc;
1725
1726 /*
1727 * Since we're using delayed interrupts, sweep up
1728 * before we report an error.
1729 */
1730 wm_txintr(sc);
1731
1732 if (sc->sc_txfree != WM_NTXDESC) {
1733 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1734 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1735 sc->sc_txnext);
1736 ifp->if_oerrors++;
1737
1738 /* Reset the interface. */
1739 (void) wm_init(ifp);
1740 }
1741
1742 /* Try to get more packets going. */
1743 wm_start(ifp);
1744 }
1745
1746 /*
1747 * wm_ioctl: [ifnet interface function]
1748 *
1749 * Handle control requests from the operator.
1750 */
1751 static int
1752 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1753 {
1754 struct wm_softc *sc = ifp->if_softc;
1755 struct ifreq *ifr = (struct ifreq *) data;
1756 int s, error;
1757
1758 s = splnet();
1759
1760 switch (cmd) {
1761 case SIOCSIFMEDIA:
1762 case SIOCGIFMEDIA:
1763 /* Flow control requires full-duplex mode. */
1764 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1765 (ifr->ifr_media & IFM_FDX) == 0)
1766 ifr->ifr_media &= ~IFM_ETH_FMASK;
1767 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1768 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1769 /* We can do both TXPAUSE and RXPAUSE. */
1770 ifr->ifr_media |=
1771 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1772 }
1773 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1774 }
1775 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1776 break;
1777 default:
1778 error = ether_ioctl(ifp, cmd, data);
1779 if (error == ENETRESET) {
1780 /*
1781 * Multicast list has changed; set the hardware filter
1782 * accordingly.
1783 */
1784 wm_set_filter(sc);
1785 error = 0;
1786 }
1787 break;
1788 }
1789
1790 /* Try to get more packets going. */
1791 wm_start(ifp);
1792
1793 splx(s);
1794 return (error);
1795 }
1796
1797 /*
1798 * wm_intr:
1799 *
1800 * Interrupt service routine.
1801 */
1802 static int
1803 wm_intr(void *arg)
1804 {
1805 struct wm_softc *sc = arg;
1806 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1807 uint32_t icr;
1808 int wantinit, handled = 0;
1809
1810 for (wantinit = 0; wantinit == 0;) {
1811 icr = CSR_READ(sc, WMREG_ICR);
1812 if ((icr & sc->sc_icr) == 0)
1813 break;
1814
1815 #if 0 /*NRND > 0*/
1816 if (RND_ENABLED(&sc->rnd_source))
1817 rnd_add_uint32(&sc->rnd_source, icr);
1818 #endif
1819
1820 handled = 1;
1821
1822 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1823 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1824 DPRINTF(WM_DEBUG_RX,
1825 ("%s: RX: got Rx intr 0x%08x\n",
1826 sc->sc_dev.dv_xname,
1827 icr & (ICR_RXDMT0|ICR_RXT0)));
1828 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1829 }
1830 #endif
1831 wm_rxintr(sc);
1832
1833 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1834 if (icr & ICR_TXDW) {
1835 DPRINTF(WM_DEBUG_TX,
1836 ("%s: TX: got TXDW interrupt\n",
1837 sc->sc_dev.dv_xname));
1838 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1839 }
1840 #endif
1841 wm_txintr(sc);
1842
1843 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1844 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1845 wm_linkintr(sc, icr);
1846 }
1847
1848 if (icr & ICR_RXO) {
1849 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1850 wantinit = 1;
1851 }
1852 }
1853
1854 if (handled) {
1855 if (wantinit)
1856 wm_init(ifp);
1857
1858 /* Try to get more packets going. */
1859 wm_start(ifp);
1860 }
1861
1862 return (handled);
1863 }
1864
1865 /*
1866 * wm_txintr:
1867 *
1868 * Helper; handle transmit interrupts.
1869 */
1870 static void
1871 wm_txintr(struct wm_softc *sc)
1872 {
1873 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1874 struct wm_txsoft *txs;
1875 uint8_t status;
1876 int i;
1877
1878 ifp->if_flags &= ~IFF_OACTIVE;
1879
1880 /*
1881 * Go through the Tx list and free mbufs for those
1882 * frames which have been transmitted.
1883 */
1884 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
1885 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
1886 txs = &sc->sc_txsoft[i];
1887
1888 DPRINTF(WM_DEBUG_TX,
1889 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1890
1891 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1892 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1893
1894 status =
1895 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
1896 if ((status & WTX_ST_DD) == 0) {
1897 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1898 BUS_DMASYNC_PREREAD);
1899 break;
1900 }
1901
1902 DPRINTF(WM_DEBUG_TX,
1903 ("%s: TX: job %d done: descs %d..%d\n",
1904 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1905 txs->txs_lastdesc));
1906
1907 /*
1908 * XXX We should probably be using the statistics
1909 * XXX registers, but I don't know if they exist
1910 * XXX on chips before the i82544.
1911 */
1912
1913 #ifdef WM_EVENT_COUNTERS
1914 if (status & WTX_ST_TU)
1915 WM_EVCNT_INCR(&sc->sc_ev_tu);
1916 #endif /* WM_EVENT_COUNTERS */
1917
1918 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1919 ifp->if_oerrors++;
1920 if (status & WTX_ST_LC)
1921 printf("%s: late collision\n",
1922 sc->sc_dev.dv_xname);
1923 else if (status & WTX_ST_EC) {
1924 ifp->if_collisions += 16;
1925 printf("%s: excessive collisions\n",
1926 sc->sc_dev.dv_xname);
1927 }
1928 } else
1929 ifp->if_opackets++;
1930
1931 sc->sc_txfree += txs->txs_ndesc;
1932 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1933 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1934 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1935 m_freem(txs->txs_mbuf);
1936 txs->txs_mbuf = NULL;
1937 }
1938
1939 /* Update the dirty transmit buffer pointer. */
1940 sc->sc_txsdirty = i;
1941 DPRINTF(WM_DEBUG_TX,
1942 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1943
1944 /*
1945 * If there are no more pending transmissions, cancel the watchdog
1946 * timer.
1947 */
1948 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
1949 ifp->if_timer = 0;
1950 }
1951
1952 /*
1953 * wm_rxintr:
1954 *
1955 * Helper; handle receive interrupts.
1956 */
1957 static void
1958 wm_rxintr(struct wm_softc *sc)
1959 {
1960 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1961 struct wm_rxsoft *rxs;
1962 struct mbuf *m;
1963 int i, len;
1964 uint8_t status, errors;
1965
1966 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1967 rxs = &sc->sc_rxsoft[i];
1968
1969 DPRINTF(WM_DEBUG_RX,
1970 ("%s: RX: checking descriptor %d\n",
1971 sc->sc_dev.dv_xname, i));
1972
1973 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1974
1975 status = sc->sc_rxdescs[i].wrx_status;
1976 errors = sc->sc_rxdescs[i].wrx_errors;
1977 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1978
1979 if ((status & WRX_ST_DD) == 0) {
1980 /*
1981 * We have processed all of the receive descriptors.
1982 */
1983 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1984 break;
1985 }
1986
1987 if (__predict_false(sc->sc_rxdiscard)) {
1988 DPRINTF(WM_DEBUG_RX,
1989 ("%s: RX: discarding contents of descriptor %d\n",
1990 sc->sc_dev.dv_xname, i));
1991 WM_INIT_RXDESC(sc, i);
1992 if (status & WRX_ST_EOP) {
1993 /* Reset our state. */
1994 DPRINTF(WM_DEBUG_RX,
1995 ("%s: RX: resetting rxdiscard -> 0\n",
1996 sc->sc_dev.dv_xname));
1997 sc->sc_rxdiscard = 0;
1998 }
1999 continue;
2000 }
2001
2002 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2003 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
2004
2005 m = rxs->rxs_mbuf;
2006
2007 /*
2008 * Add a new receive buffer to the ring.
2009 */
2010 if (wm_add_rxbuf(sc, i) != 0) {
2011 /*
2012 * Failed, throw away what we've done so
2013 * far, and discard the rest of the packet.
2014 */
2015 ifp->if_ierrors++;
2016 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2017 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2018 WM_INIT_RXDESC(sc, i);
2019 if ((status & WRX_ST_EOP) == 0)
2020 sc->sc_rxdiscard = 1;
2021 if (sc->sc_rxhead != NULL)
2022 m_freem(sc->sc_rxhead);
2023 WM_RXCHAIN_RESET(sc);
2024 DPRINTF(WM_DEBUG_RX,
2025 ("%s: RX: Rx buffer allocation failed, "
2026 "dropping packet%s\n", sc->sc_dev.dv_xname,
2027 sc->sc_rxdiscard ? " (discard)" : ""));
2028 continue;
2029 }
2030
2031 WM_RXCHAIN_LINK(sc, m);
2032
2033 m->m_len = len;
2034
2035 DPRINTF(WM_DEBUG_RX,
2036 ("%s: RX: buffer at %p len %d\n",
2037 sc->sc_dev.dv_xname, m->m_data, len));
2038
2039 /*
2040 * If this is not the end of the packet, keep
2041 * looking.
2042 */
2043 if ((status & WRX_ST_EOP) == 0) {
2044 sc->sc_rxlen += len;
2045 DPRINTF(WM_DEBUG_RX,
2046 ("%s: RX: not yet EOP, rxlen -> %d\n",
2047 sc->sc_dev.dv_xname, sc->sc_rxlen));
2048 continue;
2049 }
2050
2051 /*
2052 * Okay, we have the entire packet now...
2053 */
2054 *sc->sc_rxtailp = NULL;
2055 m = sc->sc_rxhead;
2056 len += sc->sc_rxlen;
2057
2058 WM_RXCHAIN_RESET(sc);
2059
2060 DPRINTF(WM_DEBUG_RX,
2061 ("%s: RX: have entire packet, len -> %d\n",
2062 sc->sc_dev.dv_xname, len));
2063
2064 /*
2065 * If an error occurred, update stats and drop the packet.
2066 */
2067 if (errors &
2068 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2069 ifp->if_ierrors++;
2070 if (errors & WRX_ER_SE)
2071 printf("%s: symbol error\n",
2072 sc->sc_dev.dv_xname);
2073 else if (errors & WRX_ER_SEQ)
2074 printf("%s: receive sequence error\n",
2075 sc->sc_dev.dv_xname);
2076 else if (errors & WRX_ER_CE)
2077 printf("%s: CRC error\n",
2078 sc->sc_dev.dv_xname);
2079 m_freem(m);
2080 continue;
2081 }
2082
2083 /*
2084 * No errors. Receive the packet.
2085 *
2086 * Note, we have configured the chip to include the
2087 * CRC with every packet.
2088 */
2089 m->m_flags |= M_HASFCS;
2090 m->m_pkthdr.rcvif = ifp;
2091 m->m_pkthdr.len = len;
2092
2093 #if 0 /* XXXJRT */
2094 /*
2095 * If VLANs are enabled, VLAN packets have been unwrapped
2096 * for us. Associate the tag with the packet.
2097 */
2098 if (sc->sc_ethercom.ec_nvlans != 0 &&
2099 (status & WRX_ST_VP) != 0) {
2100 struct m_tag *vtag;
2101
2102 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2103 M_NOWAIT);
2104 if (vtag == NULL) {
2105 ifp->if_ierrors++;
2106 printf("%s: unable to allocate VLAN tag\n",
2107 sc->sc_dev.dv_xname);
2108 m_freem(m);
2109 continue;
2110 }
2111
2112 *(u_int *)(vtag + 1) =
2113 le16toh(sc->sc_rxdescs[i].wrx_special);
2114 }
2115 #endif /* XXXJRT */
2116
2117 /*
2118 * Set up checksum info for this packet.
2119 */
2120 if (status & WRX_ST_IPCS) {
2121 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2122 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2123 if (errors & WRX_ER_IPE)
2124 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2125 }
2126 if (status & WRX_ST_TCPCS) {
2127 /*
2128 * Note: we don't know if this was TCP or UDP,
2129 * so we just set both bits, and expect the
2130 * upper layers to deal.
2131 */
2132 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2133 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2134 if (errors & WRX_ER_TCPE)
2135 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2136 }
2137
2138 ifp->if_ipackets++;
2139
2140 #if NBPFILTER > 0
2141 /* Pass this up to any BPF listeners. */
2142 if (ifp->if_bpf)
2143 bpf_mtap(ifp->if_bpf, m);
2144 #endif /* NBPFILTER > 0 */
2145
2146 /* Pass it on. */
2147 (*ifp->if_input)(ifp, m);
2148 }
2149
2150 /* Update the receive pointer. */
2151 sc->sc_rxptr = i;
2152
2153 DPRINTF(WM_DEBUG_RX,
2154 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2155 }
2156
2157 /*
2158 * wm_linkintr:
2159 *
2160 * Helper; handle link interrupts.
2161 */
2162 static void
2163 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2164 {
2165 uint32_t status;
2166
2167 /*
2168 * If we get a link status interrupt on a 1000BASE-T
2169 * device, just fall into the normal MII tick path.
2170 */
2171 if (sc->sc_flags & WM_F_HAS_MII) {
2172 if (icr & ICR_LSC) {
2173 DPRINTF(WM_DEBUG_LINK,
2174 ("%s: LINK: LSC -> mii_tick\n",
2175 sc->sc_dev.dv_xname));
2176 mii_tick(&sc->sc_mii);
2177 } else if (icr & ICR_RXSEQ) {
2178 DPRINTF(WM_DEBUG_LINK,
2179 ("%s: LINK Receive sequence error\n",
2180 sc->sc_dev.dv_xname));
2181 }
2182 return;
2183 }
2184
2185 /*
2186 * If we are now receiving /C/, check for link again in
2187 * a couple of link clock ticks.
2188 */
2189 if (icr & ICR_RXCFG) {
2190 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2191 sc->sc_dev.dv_xname));
2192 sc->sc_tbi_anstate = 2;
2193 }
2194
2195 if (icr & ICR_LSC) {
2196 status = CSR_READ(sc, WMREG_STATUS);
2197 if (status & STATUS_LU) {
2198 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2199 sc->sc_dev.dv_xname,
2200 (status & STATUS_FD) ? "FDX" : "HDX"));
2201 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2202 sc->sc_fcrtl &= ~FCRTL_XONE;
2203 if (status & STATUS_FD)
2204 sc->sc_tctl |=
2205 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2206 else
2207 sc->sc_tctl |=
2208 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2209 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2210 sc->sc_fcrtl |= FCRTL_XONE;
2211 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2212 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2213 WMREG_OLD_FCRTL : WMREG_FCRTL,
2214 sc->sc_fcrtl);
2215 sc->sc_tbi_linkup = 1;
2216 } else {
2217 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2218 sc->sc_dev.dv_xname));
2219 sc->sc_tbi_linkup = 0;
2220 }
2221 sc->sc_tbi_anstate = 2;
2222 wm_tbi_set_linkled(sc);
2223 } else if (icr & ICR_RXSEQ) {
2224 DPRINTF(WM_DEBUG_LINK,
2225 ("%s: LINK: Receive sequence error\n",
2226 sc->sc_dev.dv_xname));
2227 }
2228 }
2229
2230 /*
2231 * wm_tick:
2232 *
2233 * One second timer, used to check link status, sweep up
2234 * completed transmit jobs, etc.
2235 */
2236 static void
2237 wm_tick(void *arg)
2238 {
2239 struct wm_softc *sc = arg;
2240 int s;
2241
2242 s = splnet();
2243
2244 if (sc->sc_type >= WM_T_82542_2_1) {
2245 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2246 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2247 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2248 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2249 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2250 }
2251
2252 if (sc->sc_flags & WM_F_HAS_MII)
2253 mii_tick(&sc->sc_mii);
2254 else
2255 wm_tbi_check_link(sc);
2256
2257 splx(s);
2258
2259 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2260 }
2261
2262 /*
2263 * wm_reset:
2264 *
2265 * Reset the i82542 chip.
2266 */
2267 static void
2268 wm_reset(struct wm_softc *sc)
2269 {
2270 int i;
2271
2272 switch (sc->sc_type) {
2273 case WM_T_82544:
2274 case WM_T_82540:
2275 case WM_T_82545:
2276 case WM_T_82546:
2277 case WM_T_82541:
2278 case WM_T_82541_2:
2279 /*
2280 * These chips have a problem with the memory-mapped
2281 * write cycle when issuing the reset, so use I/O-mapped
2282 * access, if possible.
2283 */
2284 if (sc->sc_flags & WM_F_IOH_VALID)
2285 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2286 else
2287 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2288 break;
2289
2290 case WM_T_82545_3:
2291 case WM_T_82546_3:
2292 /* Use the shadow control register on these chips. */
2293 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2294 break;
2295
2296 default:
2297 /* Everything else can safely use the documented method. */
2298 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2299 break;
2300 }
2301 delay(10000);
2302
2303 for (i = 0; i < 1000; i++) {
2304 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2305 return;
2306 delay(20);
2307 }
2308
2309 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2310 printf("%s: WARNING: reset failed to complete\n",
2311 sc->sc_dev.dv_xname);
2312 }
2313
2314 /*
2315 * wm_init: [ifnet interface function]
2316 *
2317 * Initialize the interface. Must be called at splnet().
2318 */
2319 static int
2320 wm_init(struct ifnet *ifp)
2321 {
2322 struct wm_softc *sc = ifp->if_softc;
2323 struct wm_rxsoft *rxs;
2324 int i, error = 0;
2325 uint32_t reg;
2326
2327 /*
2328 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2329 * There is a small but measurable benefit to avoiding the adjusment
2330 * of the descriptor so that the headers are aligned, for normal mtu,
2331 * on such platforms. One possibility is that the DMA itself is
2332 * slightly more efficient if the front of the entire packet (instead
2333 * of the front of the headers) is aligned.
2334 *
2335 * Note we must always set align_tweak to 0 if we are using
2336 * jumbo frames.
2337 */
2338 #ifdef __NO_STRICT_ALIGNMENT
2339 sc->sc_align_tweak = 0;
2340 #else
2341 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2342 sc->sc_align_tweak = 0;
2343 else
2344 sc->sc_align_tweak = 2;
2345 #endif /* __NO_STRICT_ALIGNMENT */
2346
2347 /* Cancel any pending I/O. */
2348 wm_stop(ifp, 0);
2349
2350 /* Reset the chip to a known state. */
2351 wm_reset(sc);
2352
2353 /* Initialize the transmit descriptor ring. */
2354 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2355 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2356 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2357 sc->sc_txfree = WM_NTXDESC;
2358 sc->sc_txnext = 0;
2359
2360 sc->sc_txctx_ipcs = 0xffffffff;
2361 sc->sc_txctx_tucs = 0xffffffff;
2362
2363 if (sc->sc_type < WM_T_82543) {
2364 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
2365 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
2366 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2367 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2368 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2369 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2370 } else {
2371 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
2372 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
2373 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2374 CSR_WRITE(sc, WMREG_TDH, 0);
2375 CSR_WRITE(sc, WMREG_TDT, 0);
2376 CSR_WRITE(sc, WMREG_TIDV, 128);
2377
2378 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2379 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2380 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2381 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2382 }
2383 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2384 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2385
2386 /* Initialize the transmit job descriptors. */
2387 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
2388 sc->sc_txsoft[i].txs_mbuf = NULL;
2389 sc->sc_txsfree = WM_TXQUEUELEN(sc);
2390 sc->sc_txsnext = 0;
2391 sc->sc_txsdirty = 0;
2392
2393 /*
2394 * Initialize the receive descriptor and receive job
2395 * descriptor rings.
2396 */
2397 if (sc->sc_type < WM_T_82543) {
2398 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
2399 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
2400 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2401 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2402 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2403 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2404
2405 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2406 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2407 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2408 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2409 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2410 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2411 } else {
2412 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
2413 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
2414 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2415 CSR_WRITE(sc, WMREG_RDH, 0);
2416 CSR_WRITE(sc, WMREG_RDT, 0);
2417 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2418 }
2419 for (i = 0; i < WM_NRXDESC; i++) {
2420 rxs = &sc->sc_rxsoft[i];
2421 if (rxs->rxs_mbuf == NULL) {
2422 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2423 printf("%s: unable to allocate or map rx "
2424 "buffer %d, error = %d\n",
2425 sc->sc_dev.dv_xname, i, error);
2426 /*
2427 * XXX Should attempt to run with fewer receive
2428 * XXX buffers instead of just failing.
2429 */
2430 wm_rxdrain(sc);
2431 goto out;
2432 }
2433 } else
2434 WM_INIT_RXDESC(sc, i);
2435 }
2436 sc->sc_rxptr = 0;
2437 sc->sc_rxdiscard = 0;
2438 WM_RXCHAIN_RESET(sc);
2439
2440 /*
2441 * Clear out the VLAN table -- we don't use it (yet).
2442 */
2443 CSR_WRITE(sc, WMREG_VET, 0);
2444 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2445 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2446
2447 /*
2448 * Set up flow-control parameters.
2449 *
2450 * XXX Values could probably stand some tuning.
2451 */
2452 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2453 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2454 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2455
2456 sc->sc_fcrtl = FCRTL_DFLT;
2457 if (sc->sc_type < WM_T_82543) {
2458 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2459 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
2460 } else {
2461 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2462 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
2463 }
2464 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2465
2466 #if 0 /* XXXJRT */
2467 /* Deal with VLAN enables. */
2468 if (sc->sc_ethercom.ec_nvlans != 0)
2469 sc->sc_ctrl |= CTRL_VME;
2470 else
2471 #endif /* XXXJRT */
2472 sc->sc_ctrl &= ~CTRL_VME;
2473
2474 /* Write the control registers. */
2475 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2476 #if 0
2477 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2478 #endif
2479
2480 /*
2481 * Set up checksum offload parameters.
2482 */
2483 reg = CSR_READ(sc, WMREG_RXCSUM);
2484 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2485 reg |= RXCSUM_IPOFL;
2486 else
2487 reg &= ~RXCSUM_IPOFL;
2488 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2489 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2490 else {
2491 reg &= ~RXCSUM_TUOFL;
2492 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2493 reg &= ~RXCSUM_IPOFL;
2494 }
2495 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2496
2497 /*
2498 * Set up the interrupt registers.
2499 */
2500 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2501 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2502 ICR_RXO | ICR_RXT0;
2503 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2504 sc->sc_icr |= ICR_RXCFG;
2505 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2506
2507 /* Set up the inter-packet gap. */
2508 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2509
2510 #if 0 /* XXXJRT */
2511 /* Set the VLAN ethernetype. */
2512 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2513 #endif
2514
2515 /*
2516 * Set up the transmit control register; we start out with
2517 * a collision distance suitable for FDX, but update it whe
2518 * we resolve the media type.
2519 */
2520 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2521 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2522 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2523
2524 /* Set the media. */
2525 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2526
2527 /*
2528 * Set up the receive control register; we actually program
2529 * the register when we set the receive filter. Use multicast
2530 * address offset type 0.
2531 *
2532 * Only the i82544 has the ability to strip the incoming
2533 * CRC, so we don't enable that feature.
2534 */
2535 sc->sc_mchash_type = 0;
2536 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2537 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2538
2539 if(MCLBYTES == 2048) {
2540 sc->sc_rctl |= RCTL_2k;
2541 } else {
2542 if(sc->sc_type >= WM_T_82543) {
2543 switch(MCLBYTES) {
2544 case 4096:
2545 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2546 break;
2547 case 8192:
2548 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2549 break;
2550 case 16384:
2551 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2552 break;
2553 default:
2554 panic("wm_init: MCLBYTES %d unsupported",
2555 MCLBYTES);
2556 break;
2557 }
2558 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2559 }
2560
2561 /* Set the receive filter. */
2562 wm_set_filter(sc);
2563
2564 /* Start the one second link check clock. */
2565 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2566
2567 /* ...all done! */
2568 ifp->if_flags |= IFF_RUNNING;
2569 ifp->if_flags &= ~IFF_OACTIVE;
2570
2571 out:
2572 if (error)
2573 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2574 return (error);
2575 }
2576
2577 /*
2578 * wm_rxdrain:
2579 *
2580 * Drain the receive queue.
2581 */
2582 static void
2583 wm_rxdrain(struct wm_softc *sc)
2584 {
2585 struct wm_rxsoft *rxs;
2586 int i;
2587
2588 for (i = 0; i < WM_NRXDESC; i++) {
2589 rxs = &sc->sc_rxsoft[i];
2590 if (rxs->rxs_mbuf != NULL) {
2591 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2592 m_freem(rxs->rxs_mbuf);
2593 rxs->rxs_mbuf = NULL;
2594 }
2595 }
2596 }
2597
2598 /*
2599 * wm_stop: [ifnet interface function]
2600 *
2601 * Stop transmission on the interface.
2602 */
2603 static void
2604 wm_stop(struct ifnet *ifp, int disable)
2605 {
2606 struct wm_softc *sc = ifp->if_softc;
2607 struct wm_txsoft *txs;
2608 int i;
2609
2610 /* Stop the one second clock. */
2611 callout_stop(&sc->sc_tick_ch);
2612
2613 if (sc->sc_flags & WM_F_HAS_MII) {
2614 /* Down the MII. */
2615 mii_down(&sc->sc_mii);
2616 }
2617
2618 /* Stop the transmit and receive processes. */
2619 CSR_WRITE(sc, WMREG_TCTL, 0);
2620 CSR_WRITE(sc, WMREG_RCTL, 0);
2621
2622 /* Release any queued transmit buffers. */
2623 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2624 txs = &sc->sc_txsoft[i];
2625 if (txs->txs_mbuf != NULL) {
2626 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2627 m_freem(txs->txs_mbuf);
2628 txs->txs_mbuf = NULL;
2629 }
2630 }
2631
2632 if (disable)
2633 wm_rxdrain(sc);
2634
2635 /* Mark the interface as down and cancel the watchdog timer. */
2636 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2637 ifp->if_timer = 0;
2638 }
2639
2640 /*
2641 * wm_acquire_eeprom:
2642 *
2643 * Perform the EEPROM handshake required on some chips.
2644 */
2645 static int
2646 wm_acquire_eeprom(struct wm_softc *sc)
2647 {
2648 uint32_t reg;
2649 int x;
2650
2651 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2652 reg = CSR_READ(sc, WMREG_EECD);
2653
2654 /* Request EEPROM access. */
2655 reg |= EECD_EE_REQ;
2656 CSR_WRITE(sc, WMREG_EECD, reg);
2657
2658 /* ..and wait for it to be granted. */
2659 for (x = 0; x < 100; x++) {
2660 reg = CSR_READ(sc, WMREG_EECD);
2661 if (reg & EECD_EE_GNT)
2662 break;
2663 delay(5);
2664 }
2665 if ((reg & EECD_EE_GNT) == 0) {
2666 aprint_error("%s: could not acquire EEPROM GNT\n",
2667 sc->sc_dev.dv_xname);
2668 reg &= ~EECD_EE_REQ;
2669 CSR_WRITE(sc, WMREG_EECD, reg);
2670 return (1);
2671 }
2672 }
2673
2674 return (0);
2675 }
2676
2677 /*
2678 * wm_release_eeprom:
2679 *
2680 * Release the EEPROM mutex.
2681 */
2682 static void
2683 wm_release_eeprom(struct wm_softc *sc)
2684 {
2685 uint32_t reg;
2686
2687 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2688 reg = CSR_READ(sc, WMREG_EECD);
2689 reg &= ~EECD_EE_REQ;
2690 CSR_WRITE(sc, WMREG_EECD, reg);
2691 }
2692 }
2693
2694 /*
2695 * wm_eeprom_sendbits:
2696 *
2697 * Send a series of bits to the EEPROM.
2698 */
2699 static void
2700 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2701 {
2702 uint32_t reg;
2703 int x;
2704
2705 reg = CSR_READ(sc, WMREG_EECD);
2706
2707 for (x = nbits; x > 0; x--) {
2708 if (bits & (1U << (x - 1)))
2709 reg |= EECD_DI;
2710 else
2711 reg &= ~EECD_DI;
2712 CSR_WRITE(sc, WMREG_EECD, reg);
2713 delay(2);
2714 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2715 delay(2);
2716 CSR_WRITE(sc, WMREG_EECD, reg);
2717 delay(2);
2718 }
2719 }
2720
2721 /*
2722 * wm_eeprom_recvbits:
2723 *
2724 * Receive a series of bits from the EEPROM.
2725 */
2726 static void
2727 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2728 {
2729 uint32_t reg, val;
2730 int x;
2731
2732 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2733
2734 val = 0;
2735 for (x = nbits; x > 0; x--) {
2736 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2737 delay(2);
2738 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2739 val |= (1U << (x - 1));
2740 CSR_WRITE(sc, WMREG_EECD, reg);
2741 delay(2);
2742 }
2743 *valp = val;
2744 }
2745
2746 /*
2747 * wm_read_eeprom_uwire:
2748 *
2749 * Read a word from the EEPROM using the MicroWire protocol.
2750 */
2751 static int
2752 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2753 {
2754 uint32_t reg, val;
2755 int i;
2756
2757 for (i = 0; i < wordcnt; i++) {
2758 /* Clear SK and DI. */
2759 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2760 CSR_WRITE(sc, WMREG_EECD, reg);
2761
2762 /* Set CHIP SELECT. */
2763 reg |= EECD_CS;
2764 CSR_WRITE(sc, WMREG_EECD, reg);
2765 delay(2);
2766
2767 /* Shift in the READ command. */
2768 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2769
2770 /* Shift in address. */
2771 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2772
2773 /* Shift out the data. */
2774 wm_eeprom_recvbits(sc, &val, 16);
2775 data[i] = val & 0xffff;
2776
2777 /* Clear CHIP SELECT. */
2778 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2779 CSR_WRITE(sc, WMREG_EECD, reg);
2780 delay(2);
2781 }
2782
2783 return (0);
2784 }
2785
2786 /*
2787 * wm_spi_eeprom_ready:
2788 *
2789 * Wait for a SPI EEPROM to be ready for commands.
2790 */
2791 static int
2792 wm_spi_eeprom_ready(struct wm_softc *sc)
2793 {
2794 uint32_t val;
2795 int usec;
2796
2797 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2798 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2799 wm_eeprom_recvbits(sc, &val, 8);
2800 if ((val & SPI_SR_RDY) == 0)
2801 break;
2802 }
2803 if (usec >= SPI_MAX_RETRIES) {
2804 aprint_error("%s: EEPROM failed to become ready\n",
2805 sc->sc_dev.dv_xname);
2806 return (1);
2807 }
2808 return (0);
2809 }
2810
2811 /*
2812 * wm_read_eeprom_spi:
2813 *
2814 * Read a work from the EEPROM using the SPI protocol.
2815 */
2816 static int
2817 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2818 {
2819 uint32_t reg, val;
2820 int i;
2821 uint8_t opc;
2822
2823 /* Clear SK and CS. */
2824 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2825 CSR_WRITE(sc, WMREG_EECD, reg);
2826 delay(2);
2827
2828 if (wm_spi_eeprom_ready(sc))
2829 return (1);
2830
2831 /* Toggle CS to flush commands. */
2832 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2833 delay(2);
2834 CSR_WRITE(sc, WMREG_EECD, reg);
2835 delay(2);
2836
2837 opc = SPI_OPC_READ;
2838 if (sc->sc_ee_addrbits == 8 && word >= 128)
2839 opc |= SPI_OPC_A8;
2840
2841 wm_eeprom_sendbits(sc, opc, 8);
2842 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2843
2844 for (i = 0; i < wordcnt; i++) {
2845 wm_eeprom_recvbits(sc, &val, 16);
2846 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2847 }
2848
2849 /* Raise CS and clear SK. */
2850 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2851 CSR_WRITE(sc, WMREG_EECD, reg);
2852 delay(2);
2853
2854 return (0);
2855 }
2856
2857 /*
2858 * wm_read_eeprom:
2859 *
2860 * Read data from the serial EEPROM.
2861 */
2862 static int
2863 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2864 {
2865 int rv;
2866
2867 if (wm_acquire_eeprom(sc))
2868 return (1);
2869
2870 if (sc->sc_flags & WM_F_EEPROM_SPI)
2871 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2872 else
2873 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2874
2875 wm_release_eeprom(sc);
2876 return (rv);
2877 }
2878
2879 /*
2880 * wm_add_rxbuf:
2881 *
2882 * Add a receive buffer to the indiciated descriptor.
2883 */
2884 static int
2885 wm_add_rxbuf(struct wm_softc *sc, int idx)
2886 {
2887 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2888 struct mbuf *m;
2889 int error;
2890
2891 MGETHDR(m, M_DONTWAIT, MT_DATA);
2892 if (m == NULL)
2893 return (ENOBUFS);
2894
2895 MCLGET(m, M_DONTWAIT);
2896 if ((m->m_flags & M_EXT) == 0) {
2897 m_freem(m);
2898 return (ENOBUFS);
2899 }
2900
2901 if (rxs->rxs_mbuf != NULL)
2902 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2903
2904 rxs->rxs_mbuf = m;
2905
2906 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2907 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2908 BUS_DMA_READ|BUS_DMA_NOWAIT);
2909 if (error) {
2910 printf("%s: unable to load rx DMA map %d, error = %d\n",
2911 sc->sc_dev.dv_xname, idx, error);
2912 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2913 }
2914
2915 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2916 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2917
2918 WM_INIT_RXDESC(sc, idx);
2919
2920 return (0);
2921 }
2922
2923 /*
2924 * wm_set_ral:
2925 *
2926 * Set an entery in the receive address list.
2927 */
2928 static void
2929 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2930 {
2931 uint32_t ral_lo, ral_hi;
2932
2933 if (enaddr != NULL) {
2934 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2935 (enaddr[3] << 24);
2936 ral_hi = enaddr[4] | (enaddr[5] << 8);
2937 ral_hi |= RAL_AV;
2938 } else {
2939 ral_lo = 0;
2940 ral_hi = 0;
2941 }
2942
2943 if (sc->sc_type >= WM_T_82544) {
2944 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2945 ral_lo);
2946 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2947 ral_hi);
2948 } else {
2949 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2950 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2951 }
2952 }
2953
2954 /*
2955 * wm_mchash:
2956 *
2957 * Compute the hash of the multicast address for the 4096-bit
2958 * multicast filter.
2959 */
2960 static uint32_t
2961 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2962 {
2963 static const int lo_shift[4] = { 4, 3, 2, 0 };
2964 static const int hi_shift[4] = { 4, 5, 6, 8 };
2965 uint32_t hash;
2966
2967 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2968 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2969
2970 return (hash & 0xfff);
2971 }
2972
2973 /*
2974 * wm_set_filter:
2975 *
2976 * Set up the receive filter.
2977 */
2978 static void
2979 wm_set_filter(struct wm_softc *sc)
2980 {
2981 struct ethercom *ec = &sc->sc_ethercom;
2982 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2983 struct ether_multi *enm;
2984 struct ether_multistep step;
2985 bus_addr_t mta_reg;
2986 uint32_t hash, reg, bit;
2987 int i;
2988
2989 if (sc->sc_type >= WM_T_82544)
2990 mta_reg = WMREG_CORDOVA_MTA;
2991 else
2992 mta_reg = WMREG_MTA;
2993
2994 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2995
2996 if (ifp->if_flags & IFF_BROADCAST)
2997 sc->sc_rctl |= RCTL_BAM;
2998 if (ifp->if_flags & IFF_PROMISC) {
2999 sc->sc_rctl |= RCTL_UPE;
3000 goto allmulti;
3001 }
3002
3003 /*
3004 * Set the station address in the first RAL slot, and
3005 * clear the remaining slots.
3006 */
3007 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
3008 for (i = 1; i < WM_RAL_TABSIZE; i++)
3009 wm_set_ral(sc, NULL, i);
3010
3011 /* Clear out the multicast table. */
3012 for (i = 0; i < WM_MC_TABSIZE; i++)
3013 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3014
3015 ETHER_FIRST_MULTI(step, ec, enm);
3016 while (enm != NULL) {
3017 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3018 /*
3019 * We must listen to a range of multicast addresses.
3020 * For now, just accept all multicasts, rather than
3021 * trying to set only those filter bits needed to match
3022 * the range. (At this time, the only use of address
3023 * ranges is for IP multicast routing, for which the
3024 * range is big enough to require all bits set.)
3025 */
3026 goto allmulti;
3027 }
3028
3029 hash = wm_mchash(sc, enm->enm_addrlo);
3030
3031 reg = (hash >> 5) & 0x7f;
3032 bit = hash & 0x1f;
3033
3034 hash = CSR_READ(sc, mta_reg + (reg << 2));
3035 hash |= 1U << bit;
3036
3037 /* XXX Hardware bug?? */
3038 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3039 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3040 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3041 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3042 } else
3043 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3044
3045 ETHER_NEXT_MULTI(step, enm);
3046 }
3047
3048 ifp->if_flags &= ~IFF_ALLMULTI;
3049 goto setit;
3050
3051 allmulti:
3052 ifp->if_flags |= IFF_ALLMULTI;
3053 sc->sc_rctl |= RCTL_MPE;
3054
3055 setit:
3056 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3057 }
3058
3059 /*
3060 * wm_tbi_mediainit:
3061 *
3062 * Initialize media for use on 1000BASE-X devices.
3063 */
3064 static void
3065 wm_tbi_mediainit(struct wm_softc *sc)
3066 {
3067 const char *sep = "";
3068
3069 if (sc->sc_type < WM_T_82543)
3070 sc->sc_tipg = TIPG_WM_DFLT;
3071 else
3072 sc->sc_tipg = TIPG_LG_DFLT;
3073
3074 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3075 wm_tbi_mediastatus);
3076
3077 /*
3078 * SWD Pins:
3079 *
3080 * 0 = Link LED (output)
3081 * 1 = Loss Of Signal (input)
3082 */
3083 sc->sc_ctrl |= CTRL_SWDPIO(0);
3084 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3085
3086 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3087
3088 #define ADD(ss, mm, dd) \
3089 do { \
3090 printf("%s%s", sep, ss); \
3091 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3092 sep = ", "; \
3093 } while (/*CONSTCOND*/0)
3094
3095 printf("%s: ", sc->sc_dev.dv_xname);
3096 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3097 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3098 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3099 printf("\n");
3100
3101 #undef ADD
3102
3103 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3104 }
3105
3106 /*
3107 * wm_tbi_mediastatus: [ifmedia interface function]
3108 *
3109 * Get the current interface media status on a 1000BASE-X device.
3110 */
3111 static void
3112 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3113 {
3114 struct wm_softc *sc = ifp->if_softc;
3115 uint32_t ctrl;
3116
3117 ifmr->ifm_status = IFM_AVALID;
3118 ifmr->ifm_active = IFM_ETHER;
3119
3120 if (sc->sc_tbi_linkup == 0) {
3121 ifmr->ifm_active |= IFM_NONE;
3122 return;
3123 }
3124
3125 ifmr->ifm_status |= IFM_ACTIVE;
3126 ifmr->ifm_active |= IFM_1000_SX;
3127 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3128 ifmr->ifm_active |= IFM_FDX;
3129 ctrl = CSR_READ(sc, WMREG_CTRL);
3130 if (ctrl & CTRL_RFCE)
3131 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
3132 if (ctrl & CTRL_TFCE)
3133 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
3134 }
3135
3136 /*
3137 * wm_tbi_mediachange: [ifmedia interface function]
3138 *
3139 * Set hardware to newly-selected media on a 1000BASE-X device.
3140 */
3141 static int
3142 wm_tbi_mediachange(struct ifnet *ifp)
3143 {
3144 struct wm_softc *sc = ifp->if_softc;
3145 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3146 uint32_t status;
3147 int i;
3148
3149 sc->sc_txcw = ife->ifm_data;
3150 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
3151 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
3152 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
3153 sc->sc_txcw |= TXCW_ANE;
3154
3155 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3156 delay(10000);
3157
3158 /* NOTE: CTRL will update TFCE and RFCE automatically. */
3159
3160 sc->sc_tbi_anstate = 0;
3161
3162 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3163 /* Have signal; wait for the link to come up. */
3164 for (i = 0; i < 50; i++) {
3165 delay(10000);
3166 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3167 break;
3168 }
3169
3170 status = CSR_READ(sc, WMREG_STATUS);
3171 if (status & STATUS_LU) {
3172 /* Link is up. */
3173 DPRINTF(WM_DEBUG_LINK,
3174 ("%s: LINK: set media -> link up %s\n",
3175 sc->sc_dev.dv_xname,
3176 (status & STATUS_FD) ? "FDX" : "HDX"));
3177 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3178 sc->sc_fcrtl &= ~FCRTL_XONE;
3179 if (status & STATUS_FD)
3180 sc->sc_tctl |=
3181 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3182 else
3183 sc->sc_tctl |=
3184 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3185 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
3186 sc->sc_fcrtl |= FCRTL_XONE;
3187 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3188 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3189 WMREG_OLD_FCRTL : WMREG_FCRTL,
3190 sc->sc_fcrtl);
3191 sc->sc_tbi_linkup = 1;
3192 } else {
3193 /* Link is down. */
3194 DPRINTF(WM_DEBUG_LINK,
3195 ("%s: LINK: set media -> link down\n",
3196 sc->sc_dev.dv_xname));
3197 sc->sc_tbi_linkup = 0;
3198 }
3199 } else {
3200 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3201 sc->sc_dev.dv_xname));
3202 sc->sc_tbi_linkup = 0;
3203 }
3204
3205 wm_tbi_set_linkled(sc);
3206
3207 return (0);
3208 }
3209
3210 /*
3211 * wm_tbi_set_linkled:
3212 *
3213 * Update the link LED on 1000BASE-X devices.
3214 */
3215 static void
3216 wm_tbi_set_linkled(struct wm_softc *sc)
3217 {
3218
3219 if (sc->sc_tbi_linkup)
3220 sc->sc_ctrl |= CTRL_SWDPIN(0);
3221 else
3222 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3223
3224 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3225 }
3226
3227 /*
3228 * wm_tbi_check_link:
3229 *
3230 * Check the link on 1000BASE-X devices.
3231 */
3232 static void
3233 wm_tbi_check_link(struct wm_softc *sc)
3234 {
3235 uint32_t rxcw, ctrl, status;
3236
3237 if (sc->sc_tbi_anstate == 0)
3238 return;
3239 else if (sc->sc_tbi_anstate > 1) {
3240 DPRINTF(WM_DEBUG_LINK,
3241 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3242 sc->sc_tbi_anstate));
3243 sc->sc_tbi_anstate--;
3244 return;
3245 }
3246
3247 sc->sc_tbi_anstate = 0;
3248
3249 rxcw = CSR_READ(sc, WMREG_RXCW);
3250 ctrl = CSR_READ(sc, WMREG_CTRL);
3251 status = CSR_READ(sc, WMREG_STATUS);
3252
3253 if ((status & STATUS_LU) == 0) {
3254 DPRINTF(WM_DEBUG_LINK,
3255 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3256 sc->sc_tbi_linkup = 0;
3257 } else {
3258 DPRINTF(WM_DEBUG_LINK,
3259 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3260 (status & STATUS_FD) ? "FDX" : "HDX"));
3261 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3262 sc->sc_fcrtl &= ~FCRTL_XONE;
3263 if (status & STATUS_FD)
3264 sc->sc_tctl |=
3265 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3266 else
3267 sc->sc_tctl |=
3268 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3269 if (ctrl & CTRL_TFCE)
3270 sc->sc_fcrtl |= FCRTL_XONE;
3271 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3272 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3273 WMREG_OLD_FCRTL : WMREG_FCRTL,
3274 sc->sc_fcrtl);
3275 sc->sc_tbi_linkup = 1;
3276 }
3277
3278 wm_tbi_set_linkled(sc);
3279 }
3280
3281 /*
3282 * wm_gmii_reset:
3283 *
3284 * Reset the PHY.
3285 */
3286 static void
3287 wm_gmii_reset(struct wm_softc *sc)
3288 {
3289 uint32_t reg;
3290
3291 if (sc->sc_type >= WM_T_82544) {
3292 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3293 delay(20000);
3294
3295 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3296 delay(20000);
3297 } else {
3298 /* The PHY reset pin is active-low. */
3299 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3300 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3301 CTRL_EXT_SWDPIN(4));
3302 reg |= CTRL_EXT_SWDPIO(4);
3303
3304 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3305 delay(10);
3306
3307 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3308 delay(10);
3309
3310 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3311 delay(10);
3312 #if 0
3313 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3314 #endif
3315 }
3316 }
3317
3318 /*
3319 * wm_gmii_mediainit:
3320 *
3321 * Initialize media for use on 1000BASE-T devices.
3322 */
3323 static void
3324 wm_gmii_mediainit(struct wm_softc *sc)
3325 {
3326 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3327
3328 /* We have MII. */
3329 sc->sc_flags |= WM_F_HAS_MII;
3330
3331 sc->sc_tipg = TIPG_1000T_DFLT;
3332
3333 /*
3334 * Let the chip set speed/duplex on its own based on
3335 * signals from the PHY.
3336 */
3337 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3338 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3339
3340 /* Initialize our media structures and probe the GMII. */
3341 sc->sc_mii.mii_ifp = ifp;
3342
3343 if (sc->sc_type >= WM_T_82544) {
3344 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3345 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3346 } else {
3347 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3348 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3349 }
3350 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3351
3352 wm_gmii_reset(sc);
3353
3354 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3355 wm_gmii_mediastatus);
3356
3357 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3358 MII_OFFSET_ANY, MIIF_DOPAUSE);
3359 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3360 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3361 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3362 } else
3363 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3364 }
3365
3366 /*
3367 * wm_gmii_mediastatus: [ifmedia interface function]
3368 *
3369 * Get the current interface media status on a 1000BASE-T device.
3370 */
3371 static void
3372 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3373 {
3374 struct wm_softc *sc = ifp->if_softc;
3375
3376 mii_pollstat(&sc->sc_mii);
3377 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3378 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
3379 sc->sc_flowflags;
3380 }
3381
3382 /*
3383 * wm_gmii_mediachange: [ifmedia interface function]
3384 *
3385 * Set hardware to newly-selected media on a 1000BASE-T device.
3386 */
3387 static int
3388 wm_gmii_mediachange(struct ifnet *ifp)
3389 {
3390 struct wm_softc *sc = ifp->if_softc;
3391
3392 if (ifp->if_flags & IFF_UP)
3393 mii_mediachg(&sc->sc_mii);
3394 return (0);
3395 }
3396
3397 #define MDI_IO CTRL_SWDPIN(2)
3398 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3399 #define MDI_CLK CTRL_SWDPIN(3)
3400
3401 static void
3402 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3403 {
3404 uint32_t i, v;
3405
3406 v = CSR_READ(sc, WMREG_CTRL);
3407 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3408 v |= MDI_DIR | CTRL_SWDPIO(3);
3409
3410 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3411 if (data & i)
3412 v |= MDI_IO;
3413 else
3414 v &= ~MDI_IO;
3415 CSR_WRITE(sc, WMREG_CTRL, v);
3416 delay(10);
3417 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3418 delay(10);
3419 CSR_WRITE(sc, WMREG_CTRL, v);
3420 delay(10);
3421 }
3422 }
3423
3424 static uint32_t
3425 i82543_mii_recvbits(struct wm_softc *sc)
3426 {
3427 uint32_t v, i, data = 0;
3428
3429 v = CSR_READ(sc, WMREG_CTRL);
3430 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3431 v |= CTRL_SWDPIO(3);
3432
3433 CSR_WRITE(sc, WMREG_CTRL, v);
3434 delay(10);
3435 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3436 delay(10);
3437 CSR_WRITE(sc, WMREG_CTRL, v);
3438 delay(10);
3439
3440 for (i = 0; i < 16; i++) {
3441 data <<= 1;
3442 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3443 delay(10);
3444 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3445 data |= 1;
3446 CSR_WRITE(sc, WMREG_CTRL, v);
3447 delay(10);
3448 }
3449
3450 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3451 delay(10);
3452 CSR_WRITE(sc, WMREG_CTRL, v);
3453 delay(10);
3454
3455 return (data);
3456 }
3457
3458 #undef MDI_IO
3459 #undef MDI_DIR
3460 #undef MDI_CLK
3461
3462 /*
3463 * wm_gmii_i82543_readreg: [mii interface function]
3464 *
3465 * Read a PHY register on the GMII (i82543 version).
3466 */
3467 static int
3468 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3469 {
3470 struct wm_softc *sc = (void *) self;
3471 int rv;
3472
3473 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3474 i82543_mii_sendbits(sc, reg | (phy << 5) |
3475 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3476 rv = i82543_mii_recvbits(sc) & 0xffff;
3477
3478 DPRINTF(WM_DEBUG_GMII,
3479 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3480 sc->sc_dev.dv_xname, phy, reg, rv));
3481
3482 return (rv);
3483 }
3484
3485 /*
3486 * wm_gmii_i82543_writereg: [mii interface function]
3487 *
3488 * Write a PHY register on the GMII (i82543 version).
3489 */
3490 static void
3491 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3492 {
3493 struct wm_softc *sc = (void *) self;
3494
3495 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3496 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3497 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3498 (MII_COMMAND_START << 30), 32);
3499 }
3500
3501 /*
3502 * wm_gmii_i82544_readreg: [mii interface function]
3503 *
3504 * Read a PHY register on the GMII.
3505 */
3506 static int
3507 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3508 {
3509 struct wm_softc *sc = (void *) self;
3510 uint32_t mdic = 0;
3511 int i, rv;
3512
3513 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3514 MDIC_REGADD(reg));
3515
3516 for (i = 0; i < 100; i++) {
3517 mdic = CSR_READ(sc, WMREG_MDIC);
3518 if (mdic & MDIC_READY)
3519 break;
3520 delay(10);
3521 }
3522
3523 if ((mdic & MDIC_READY) == 0) {
3524 printf("%s: MDIC read timed out: phy %d reg %d\n",
3525 sc->sc_dev.dv_xname, phy, reg);
3526 rv = 0;
3527 } else if (mdic & MDIC_E) {
3528 #if 0 /* This is normal if no PHY is present. */
3529 printf("%s: MDIC read error: phy %d reg %d\n",
3530 sc->sc_dev.dv_xname, phy, reg);
3531 #endif
3532 rv = 0;
3533 } else {
3534 rv = MDIC_DATA(mdic);
3535 if (rv == 0xffff)
3536 rv = 0;
3537 }
3538
3539 return (rv);
3540 }
3541
3542 /*
3543 * wm_gmii_i82544_writereg: [mii interface function]
3544 *
3545 * Write a PHY register on the GMII.
3546 */
3547 static void
3548 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3549 {
3550 struct wm_softc *sc = (void *) self;
3551 uint32_t mdic = 0;
3552 int i;
3553
3554 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3555 MDIC_REGADD(reg) | MDIC_DATA(val));
3556
3557 for (i = 0; i < 100; i++) {
3558 mdic = CSR_READ(sc, WMREG_MDIC);
3559 if (mdic & MDIC_READY)
3560 break;
3561 delay(10);
3562 }
3563
3564 if ((mdic & MDIC_READY) == 0)
3565 printf("%s: MDIC write timed out: phy %d reg %d\n",
3566 sc->sc_dev.dv_xname, phy, reg);
3567 else if (mdic & MDIC_E)
3568 printf("%s: MDIC write error: phy %d reg %d\n",
3569 sc->sc_dev.dv_xname, phy, reg);
3570 }
3571
3572 /*
3573 * wm_gmii_statchg: [mii interface function]
3574 *
3575 * Callback from MII layer when media changes.
3576 */
3577 static void
3578 wm_gmii_statchg(struct device *self)
3579 {
3580 struct wm_softc *sc = (void *) self;
3581 struct mii_data *mii = &sc->sc_mii;
3582
3583 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
3584 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3585 sc->sc_fcrtl &= ~FCRTL_XONE;
3586
3587 /*
3588 * Get flow control negotiation result.
3589 */
3590 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3591 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3592 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3593 mii->mii_media_active &= ~IFM_ETH_FMASK;
3594 }
3595
3596 if (sc->sc_flowflags & IFM_FLOW) {
3597 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
3598 sc->sc_ctrl |= CTRL_TFCE;
3599 sc->sc_fcrtl |= FCRTL_XONE;
3600 }
3601 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
3602 sc->sc_ctrl |= CTRL_RFCE;
3603 }
3604
3605 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3606 DPRINTF(WM_DEBUG_LINK,
3607 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3608 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3609 } else {
3610 DPRINTF(WM_DEBUG_LINK,
3611 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3612 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3613 }
3614
3615 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3616 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3617 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
3618 : WMREG_FCRTL, sc->sc_fcrtl);
3619 }
3620