if_wm.c revision 1.73 1 /* $NetBSD: if_wm.c,v 1.73 2004/07/13 07:29:37 tron Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out performance stability issue on i82547 (fvdl).
45 * - Figure out what to do with the i82545GM and i82546GB
46 * SERDES controllers.
47 * - Fix hw VLAN assist.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.73 2004/07/13 07:29:37 tron Exp $");
52
53 #include "bpfilter.h"
54 #include "rnd.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/callout.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/queue.h>
67
68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
69
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and manage
118 * up to 64 of them at a time. We allow up to 40 DMA segments per
119 * packet (there have been reports of jumbo frame packets with as
120 * many as 30 DMA segments!).
121 */
122 #define WM_NTXSEGS 40
123 #define WM_IFQUEUELEN 256
124 #define WM_TXQUEUELEN 64
125 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
126 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
127 #define WM_NTXDESC 256
128 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
129 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
130 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
131
132 /*
133 * Receive descriptor list size. We have one Rx buffer for normal
134 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
135 * packet. We allocate 256 receive descriptors, each with a 2k
136 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
137 */
138 #define WM_NRXDESC 256
139 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
140 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
141 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
142
143 /*
144 * Control structures are DMA'd to the i82542 chip. We allocate them in
145 * a single clump that maps to a single DMA segment to make serveral things
146 * easier.
147 */
148 struct wm_control_data {
149 /*
150 * The transmit descriptors.
151 */
152 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
153
154 /*
155 * The receive descriptors.
156 */
157 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
158 };
159
160 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
161 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
162 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
163
164 /*
165 * Software state for transmit jobs.
166 */
167 struct wm_txsoft {
168 struct mbuf *txs_mbuf; /* head of our mbuf chain */
169 bus_dmamap_t txs_dmamap; /* our DMA map */
170 int txs_firstdesc; /* first descriptor in packet */
171 int txs_lastdesc; /* last descriptor in packet */
172 int txs_ndesc; /* # of descriptors used */
173 };
174
175 /*
176 * Software state for receive buffers. Each descriptor gets a
177 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
178 * more than one buffer, we chain them together.
179 */
180 struct wm_rxsoft {
181 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
182 bus_dmamap_t rxs_dmamap; /* our DMA map */
183 };
184
185 typedef enum {
186 WM_T_unknown = 0,
187 WM_T_82542_2_0, /* i82542 2.0 (really old) */
188 WM_T_82542_2_1, /* i82542 2.1+ (old) */
189 WM_T_82543, /* i82543 */
190 WM_T_82544, /* i82544 */
191 WM_T_82540, /* i82540 */
192 WM_T_82545, /* i82545 */
193 WM_T_82545_3, /* i82545 3.0+ */
194 WM_T_82546, /* i82546 */
195 WM_T_82546_3, /* i82546 3.0+ */
196 WM_T_82541, /* i82541 */
197 WM_T_82541_2, /* i82541 2.0+ */
198 WM_T_82547, /* i82547 */
199 WM_T_82547_2, /* i82547 2.0+ */
200 } wm_chip_type;
201
202 /*
203 * Software state per device.
204 */
205 struct wm_softc {
206 struct device sc_dev; /* generic device information */
207 bus_space_tag_t sc_st; /* bus space tag */
208 bus_space_handle_t sc_sh; /* bus space handle */
209 bus_space_tag_t sc_iot; /* I/O space tag */
210 bus_space_handle_t sc_ioh; /* I/O space handle */
211 bus_dma_tag_t sc_dmat; /* bus DMA tag */
212 struct ethercom sc_ethercom; /* ethernet common data */
213 void *sc_sdhook; /* shutdown hook */
214
215 wm_chip_type sc_type; /* chip type */
216 int sc_flags; /* flags; see below */
217 int sc_bus_speed; /* PCI/PCIX bus speed */
218 int sc_pcix_offset; /* PCIX capability register offset */
219 int sc_flowflags; /* 802.3x flow control flags */
220
221 void *sc_ih; /* interrupt cookie */
222
223 int sc_ee_addrbits; /* EEPROM address bits */
224
225 struct mii_data sc_mii; /* MII/media information */
226
227 struct callout sc_tick_ch; /* tick callout */
228
229 bus_dmamap_t sc_cddmamap; /* control data DMA map */
230 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
231
232 int sc_align_tweak;
233
234 /*
235 * Software state for the transmit and receive descriptors.
236 */
237 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
238 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
239
240 /*
241 * Control data structures.
242 */
243 struct wm_control_data *sc_control_data;
244 #define sc_txdescs sc_control_data->wcd_txdescs
245 #define sc_rxdescs sc_control_data->wcd_rxdescs
246
247 #ifdef WM_EVENT_COUNTERS
248 /* Event counters. */
249 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
250 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
251 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
252 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
253 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
254 struct evcnt sc_ev_rxintr; /* Rx interrupts */
255 struct evcnt sc_ev_linkintr; /* Link interrupts */
256
257 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
258 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
259 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
260 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
261
262 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
263 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
264 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
265
266 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
267 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
268
269 struct evcnt sc_ev_tu; /* Tx underrun */
270
271 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
272 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
273 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
274 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
275 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
276 #endif /* WM_EVENT_COUNTERS */
277
278 bus_addr_t sc_tdt_reg; /* offset of TDT register */
279
280 int sc_txfree; /* number of free Tx descriptors */
281 int sc_txnext; /* next ready Tx descriptor */
282
283 int sc_txsfree; /* number of free Tx jobs */
284 int sc_txsnext; /* next free Tx job */
285 int sc_txsdirty; /* dirty Tx jobs */
286
287 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
288 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
289
290 bus_addr_t sc_rdt_reg; /* offset of RDT register */
291
292 int sc_rxptr; /* next ready Rx descriptor/queue ent */
293 int sc_rxdiscard;
294 int sc_rxlen;
295 struct mbuf *sc_rxhead;
296 struct mbuf *sc_rxtail;
297 struct mbuf **sc_rxtailp;
298
299 uint32_t sc_ctrl; /* prototype CTRL register */
300 #if 0
301 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
302 #endif
303 uint32_t sc_icr; /* prototype interrupt bits */
304 uint32_t sc_tctl; /* prototype TCTL register */
305 uint32_t sc_rctl; /* prototype RCTL register */
306 uint32_t sc_txcw; /* prototype TXCW register */
307 uint32_t sc_tipg; /* prototype TIPG register */
308 uint32_t sc_fcrtl; /* prototype FCRTL register */
309
310 int sc_tbi_linkup; /* TBI link status */
311 int sc_tbi_anstate; /* autonegotiation state */
312
313 int sc_mchash_type; /* multicast filter offset */
314
315 #if NRND > 0
316 rndsource_element_t rnd_source; /* random source */
317 #endif
318 };
319
320 #define WM_RXCHAIN_RESET(sc) \
321 do { \
322 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
323 *(sc)->sc_rxtailp = NULL; \
324 (sc)->sc_rxlen = 0; \
325 } while (/*CONSTCOND*/0)
326
327 #define WM_RXCHAIN_LINK(sc, m) \
328 do { \
329 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
330 (sc)->sc_rxtailp = &(m)->m_next; \
331 } while (/*CONSTCOND*/0)
332
333 /* sc_flags */
334 #define WM_F_HAS_MII 0x01 /* has MII */
335 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
336 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
337 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
338 #define WM_F_BUS64 0x20 /* bus is 64-bit */
339 #define WM_F_PCIX 0x40 /* bus is PCI-X */
340 #define WM_F_CSA 0x80 /* bus is CSA */
341
342 #ifdef WM_EVENT_COUNTERS
343 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
344 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
345 #else
346 #define WM_EVCNT_INCR(ev) /* nothing */
347 #define WM_EVCNT_ADD(ev, val) /* nothing */
348 #endif
349
350 #define CSR_READ(sc, reg) \
351 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
352 #define CSR_WRITE(sc, reg, val) \
353 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
354
355 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
356 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
357
358 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
359 #define WM_CDTXADDR_HI(sc, x) \
360 (sizeof(bus_addr_t) == 8 ? \
361 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
362
363 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
364 #define WM_CDRXADDR_HI(sc, x) \
365 (sizeof(bus_addr_t) == 8 ? \
366 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
367
368 #define WM_CDTXSYNC(sc, x, n, ops) \
369 do { \
370 int __x, __n; \
371 \
372 __x = (x); \
373 __n = (n); \
374 \
375 /* If it will wrap around, sync to the end of the ring. */ \
376 if ((__x + __n) > WM_NTXDESC) { \
377 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
378 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
379 (WM_NTXDESC - __x), (ops)); \
380 __n -= (WM_NTXDESC - __x); \
381 __x = 0; \
382 } \
383 \
384 /* Now sync whatever is left. */ \
385 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
386 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
387 } while (/*CONSTCOND*/0)
388
389 #define WM_CDRXSYNC(sc, x, ops) \
390 do { \
391 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
392 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
393 } while (/*CONSTCOND*/0)
394
395 #define WM_INIT_RXDESC(sc, x) \
396 do { \
397 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
398 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
399 struct mbuf *__m = __rxs->rxs_mbuf; \
400 \
401 /* \
402 * Note: We scoot the packet forward 2 bytes in the buffer \
403 * so that the payload after the Ethernet header is aligned \
404 * to a 4-byte boundary. \
405 * \
406 * XXX BRAINDAMAGE ALERT! \
407 * The stupid chip uses the same size for every buffer, which \
408 * is set in the Receive Control register. We are using the 2K \
409 * size option, but what we REALLY want is (2K - 2)! For this \
410 * reason, we can't "scoot" packets longer than the standard \
411 * Ethernet MTU. On strict-alignment platforms, if the total \
412 * size exceeds (2K - 2) we set align_tweak to 0 and let \
413 * the upper layer copy the headers. \
414 */ \
415 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
416 \
417 wm_set_dma_addr(&__rxd->wrx_addr, \
418 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
419 __rxd->wrx_len = 0; \
420 __rxd->wrx_cksum = 0; \
421 __rxd->wrx_status = 0; \
422 __rxd->wrx_errors = 0; \
423 __rxd->wrx_special = 0; \
424 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
425 \
426 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
427 } while (/*CONSTCOND*/0)
428
429 static void wm_start(struct ifnet *);
430 static void wm_watchdog(struct ifnet *);
431 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
432 static int wm_init(struct ifnet *);
433 static void wm_stop(struct ifnet *, int);
434
435 static void wm_shutdown(void *);
436
437 static void wm_reset(struct wm_softc *);
438 static void wm_rxdrain(struct wm_softc *);
439 static int wm_add_rxbuf(struct wm_softc *, int);
440 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
441 static void wm_tick(void *);
442
443 static void wm_set_filter(struct wm_softc *);
444
445 static int wm_intr(void *);
446 static void wm_txintr(struct wm_softc *);
447 static void wm_rxintr(struct wm_softc *);
448 static void wm_linkintr(struct wm_softc *, uint32_t);
449
450 static void wm_tbi_mediainit(struct wm_softc *);
451 static int wm_tbi_mediachange(struct ifnet *);
452 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
453
454 static void wm_tbi_set_linkled(struct wm_softc *);
455 static void wm_tbi_check_link(struct wm_softc *);
456
457 static void wm_gmii_reset(struct wm_softc *);
458
459 static int wm_gmii_i82543_readreg(struct device *, int, int);
460 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
461
462 static int wm_gmii_i82544_readreg(struct device *, int, int);
463 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
464
465 static void wm_gmii_statchg(struct device *);
466
467 static void wm_gmii_mediainit(struct wm_softc *);
468 static int wm_gmii_mediachange(struct ifnet *);
469 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
470
471 static int wm_match(struct device *, struct cfdata *, void *);
472 static void wm_attach(struct device *, struct device *, void *);
473
474 CFATTACH_DECL(wm, sizeof(struct wm_softc),
475 wm_match, wm_attach, NULL, NULL);
476
477 /*
478 * Devices supported by this driver.
479 */
480 const struct wm_product {
481 pci_vendor_id_t wmp_vendor;
482 pci_product_id_t wmp_product;
483 const char *wmp_name;
484 wm_chip_type wmp_type;
485 int wmp_flags;
486 #define WMP_F_1000X 0x01
487 #define WMP_F_1000T 0x02
488 } wm_products[] = {
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
490 "Intel i82542 1000BASE-X Ethernet",
491 WM_T_82542_2_1, WMP_F_1000X },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
494 "Intel i82543GC 1000BASE-X Ethernet",
495 WM_T_82543, WMP_F_1000X },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
498 "Intel i82543GC 1000BASE-T Ethernet",
499 WM_T_82543, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
502 "Intel i82544EI 1000BASE-T Ethernet",
503 WM_T_82544, WMP_F_1000T },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
506 "Intel i82544EI 1000BASE-X Ethernet",
507 WM_T_82544, WMP_F_1000X },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
510 "Intel i82544GC 1000BASE-T Ethernet",
511 WM_T_82544, WMP_F_1000T },
512
513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
514 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
515 WM_T_82544, WMP_F_1000T },
516
517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
518 "Intel i82540EM 1000BASE-T Ethernet",
519 WM_T_82540, WMP_F_1000T },
520
521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
522 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
523 WM_T_82540, WMP_F_1000T },
524
525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
526 "Intel i82540EP 1000BASE-T Ethernet",
527 WM_T_82540, WMP_F_1000T },
528
529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
530 "Intel i82540EP 1000BASE-T Ethernet",
531 WM_T_82540, WMP_F_1000T },
532
533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
534 "Intel i82540EP 1000BASE-T Ethernet",
535 WM_T_82540, WMP_F_1000T },
536
537 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
538 "Intel i82545EM 1000BASE-T Ethernet",
539 WM_T_82545, WMP_F_1000T },
540
541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
542 "Intel i82545GM 1000BASE-T Ethernet",
543 WM_T_82545_3, WMP_F_1000T },
544
545 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
546 "Intel i82545GM 1000BASE-X Ethernet",
547 WM_T_82545_3, WMP_F_1000X },
548 #if 0
549 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
550 "Intel i82545GM Gigabit Ethernet (SERDES)",
551 WM_T_82545_3, WMP_F_SERDES },
552 #endif
553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
554 "Intel i82546EB 1000BASE-T Ethernet",
555 WM_T_82546, WMP_F_1000T },
556
557 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
558 "Intel i82546EB 1000BASE-T Ethernet",
559 WM_T_82546, WMP_F_1000T },
560
561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
562 "Intel i82545EM 1000BASE-X Ethernet",
563 WM_T_82545, WMP_F_1000X },
564
565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
566 "Intel i82546EB 1000BASE-X Ethernet",
567 WM_T_82546, WMP_F_1000X },
568
569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
570 "Intel i82546GB 1000BASE-T Ethernet",
571 WM_T_82546_3, WMP_F_1000T },
572
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
574 "Intel i82546GB 1000BASE-X Ethernet",
575 WM_T_82546_3, WMP_F_1000X },
576 #if 0
577 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
578 "Intel i82546GB Gigabit Ethernet (SERDES)",
579 WM_T_82546_3, WMP_F_SERDES },
580 #endif
581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
582 "Intel i82541EI 1000BASE-T Ethernet",
583 WM_T_82541, WMP_F_1000T },
584
585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
586 "Intel i82541EI Mobile 1000BASE-T Ethernet",
587 WM_T_82541, WMP_F_1000T },
588
589 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
590 "Intel i82541ER 1000BASE-T Ethernet",
591 WM_T_82541_2, WMP_F_1000T },
592
593 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
594 "Intel i82541GI 1000BASE-T Ethernet",
595 WM_T_82541_2, WMP_F_1000T },
596
597 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
598 "Intel i82541GI Mobile 1000BASE-T Ethernet",
599 WM_T_82541_2, WMP_F_1000T },
600
601 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
602 "Intel i82547EI 1000BASE-T Ethernet",
603 WM_T_82547, WMP_F_1000T },
604
605 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
606 "Intel i82547GI 1000BASE-T Ethernet",
607 WM_T_82547_2, WMP_F_1000T },
608 { 0, 0,
609 NULL,
610 0, 0 },
611 };
612
613 #ifdef WM_EVENT_COUNTERS
614 #if WM_NTXSEGS != 40
615 #error Update wm_txseg_evcnt_names
616 #endif
617 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
618 "txseg1",
619 "txseg2",
620 "txseg3",
621 "txseg4",
622 "txseg5",
623 "txseg6",
624 "txseg7",
625 "txseg8",
626 "txseg9",
627 "txseg10",
628 "txseg11",
629 "txseg12",
630 "txseg13",
631 "txseg14",
632 "txseg15",
633 "txseg16",
634 "txseg17",
635 "txseg18",
636 "txseg19",
637 "txseg20",
638 "txseg21",
639 "txseg22",
640 "txseg23",
641 "txseg24",
642 "txseg25",
643 "txseg26",
644 "txseg27",
645 "txseg28",
646 "txseg29",
647 "txseg30",
648 "txseg31",
649 "txseg32",
650 "txseg33",
651 "txseg34",
652 "txseg35",
653 "txseg36",
654 "txseg37",
655 "txseg38",
656 "txseg39",
657 "txseg40",
658 };
659 #endif /* WM_EVENT_COUNTERS */
660
661 #if 0 /* Not currently used */
662 static __inline uint32_t
663 wm_io_read(struct wm_softc *sc, int reg)
664 {
665
666 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
667 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
668 }
669 #endif
670
671 static __inline void
672 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
673 {
674
675 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
676 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
677 }
678
679 static __inline void
680 wm_set_dma_addr(__volatile wiseman_addr_t *wa, bus_addr_t v)
681 {
682 wa->wa_low = htole32(v & 0xffffffffU);
683 if (sizeof(bus_addr_t) == 8)
684 wa->wa_high = htole32((uint64_t) v >> 32);
685 else
686 wa->wa_high = 0;
687 }
688
689 static const struct wm_product *
690 wm_lookup(const struct pci_attach_args *pa)
691 {
692 const struct wm_product *wmp;
693
694 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
695 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
696 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
697 return (wmp);
698 }
699 return (NULL);
700 }
701
702 static int
703 wm_match(struct device *parent, struct cfdata *cf, void *aux)
704 {
705 struct pci_attach_args *pa = aux;
706
707 if (wm_lookup(pa) != NULL)
708 return (1);
709
710 return (0);
711 }
712
713 static void
714 wm_attach(struct device *parent, struct device *self, void *aux)
715 {
716 struct wm_softc *sc = (void *) self;
717 struct pci_attach_args *pa = aux;
718 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
719 pci_chipset_tag_t pc = pa->pa_pc;
720 pci_intr_handle_t ih;
721 const char *intrstr = NULL;
722 const char *eetype;
723 bus_space_tag_t memt;
724 bus_space_handle_t memh;
725 bus_dma_segment_t seg;
726 int memh_valid;
727 int i, rseg, error;
728 const struct wm_product *wmp;
729 uint8_t enaddr[ETHER_ADDR_LEN];
730 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
731 pcireg_t preg, memtype;
732 uint32_t reg;
733 int pmreg;
734
735 callout_init(&sc->sc_tick_ch);
736
737 wmp = wm_lookup(pa);
738 if (wmp == NULL) {
739 printf("\n");
740 panic("wm_attach: impossible");
741 }
742
743 if (pci_dma64_available(pa))
744 sc->sc_dmat = pa->pa_dmat64;
745 else
746 sc->sc_dmat = pa->pa_dmat;
747
748 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
749 aprint_naive(": Ethernet controller\n");
750 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
751
752 sc->sc_type = wmp->wmp_type;
753 if (sc->sc_type < WM_T_82543) {
754 if (preg < 2) {
755 aprint_error("%s: i82542 must be at least rev. 2\n",
756 sc->sc_dev.dv_xname);
757 return;
758 }
759 if (preg < 3)
760 sc->sc_type = WM_T_82542_2_0;
761 }
762
763 /*
764 * Map the device. All devices support memory-mapped acccess,
765 * and it is really required for normal operation.
766 */
767 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
768 switch (memtype) {
769 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
770 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
771 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
772 memtype, 0, &memt, &memh, NULL, NULL) == 0);
773 break;
774 default:
775 memh_valid = 0;
776 }
777
778 if (memh_valid) {
779 sc->sc_st = memt;
780 sc->sc_sh = memh;
781 } else {
782 aprint_error("%s: unable to map device registers\n",
783 sc->sc_dev.dv_xname);
784 return;
785 }
786
787 /*
788 * In addition, i82544 and later support I/O mapped indirect
789 * register access. It is not desirable (nor supported in
790 * this driver) to use it for normal operation, though it is
791 * required to work around bugs in some chip versions.
792 */
793 if (sc->sc_type >= WM_T_82544) {
794 /* First we have to find the I/O BAR. */
795 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
796 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
797 PCI_MAPREG_TYPE_IO)
798 break;
799 }
800 if (i == PCI_MAPREG_END)
801 aprint_error("%s: WARNING: unable to find I/O BAR\n",
802 sc->sc_dev.dv_xname);
803 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
804 0, &sc->sc_iot, &sc->sc_ioh,
805 NULL, NULL) == 0)
806 sc->sc_flags |= WM_F_IOH_VALID;
807 else
808 aprint_error("%s: WARNING: unable to map I/O space\n",
809 sc->sc_dev.dv_xname);
810 }
811
812 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
813 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
814 preg |= PCI_COMMAND_MASTER_ENABLE;
815 if (sc->sc_type < WM_T_82542_2_1)
816 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
817 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
818
819 /* Get it out of power save mode, if needed. */
820 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
821 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
822 PCI_PMCSR_STATE_MASK;
823 if (preg == PCI_PMCSR_STATE_D3) {
824 /*
825 * The card has lost all configuration data in
826 * this state, so punt.
827 */
828 aprint_error("%s: unable to wake from power state D3\n",
829 sc->sc_dev.dv_xname);
830 return;
831 }
832 if (preg != PCI_PMCSR_STATE_D0) {
833 aprint_normal("%s: waking up from power state D%d\n",
834 sc->sc_dev.dv_xname, preg);
835 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
836 PCI_PMCSR_STATE_D0);
837 }
838 }
839
840 /*
841 * Map and establish our interrupt.
842 */
843 if (pci_intr_map(pa, &ih)) {
844 aprint_error("%s: unable to map interrupt\n",
845 sc->sc_dev.dv_xname);
846 return;
847 }
848 intrstr = pci_intr_string(pc, ih);
849 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
850 if (sc->sc_ih == NULL) {
851 aprint_error("%s: unable to establish interrupt",
852 sc->sc_dev.dv_xname);
853 if (intrstr != NULL)
854 aprint_normal(" at %s", intrstr);
855 aprint_normal("\n");
856 return;
857 }
858 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
859
860 /*
861 * Determine a few things about the bus we're connected to.
862 */
863 if (sc->sc_type < WM_T_82543) {
864 /* We don't really know the bus characteristics here. */
865 sc->sc_bus_speed = 33;
866 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
867 /*
868 * CSA (Communication Streaming Architecture) is about as fast
869 * a 32-bit 66MHz PCI Bus.
870 */
871 sc->sc_flags |= WM_F_CSA;
872 sc->sc_bus_speed = 66;
873 aprint_verbose("%s: Communication Streaming Architecture\n",
874 sc->sc_dev.dv_xname);
875 } else {
876 reg = CSR_READ(sc, WMREG_STATUS);
877 if (reg & STATUS_BUS64)
878 sc->sc_flags |= WM_F_BUS64;
879 if (sc->sc_type >= WM_T_82544 &&
880 (reg & STATUS_PCIX_MODE) != 0) {
881 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
882
883 sc->sc_flags |= WM_F_PCIX;
884 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
885 PCI_CAP_PCIX,
886 &sc->sc_pcix_offset, NULL) == 0)
887 aprint_error("%s: unable to find PCIX "
888 "capability\n", sc->sc_dev.dv_xname);
889 else if (sc->sc_type != WM_T_82545_3 &&
890 sc->sc_type != WM_T_82546_3) {
891 /*
892 * Work around a problem caused by the BIOS
893 * setting the max memory read byte count
894 * incorrectly.
895 */
896 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
897 sc->sc_pcix_offset + PCI_PCIX_CMD);
898 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
899 sc->sc_pcix_offset + PCI_PCIX_STATUS);
900
901 bytecnt =
902 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
903 PCI_PCIX_CMD_BYTECNT_SHIFT;
904 maxb =
905 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
906 PCI_PCIX_STATUS_MAXB_SHIFT;
907 if (bytecnt > maxb) {
908 aprint_verbose("%s: resetting PCI-X "
909 "MMRBC: %d -> %d\n",
910 sc->sc_dev.dv_xname,
911 512 << bytecnt, 512 << maxb);
912 pcix_cmd = (pcix_cmd &
913 ~PCI_PCIX_CMD_BYTECNT_MASK) |
914 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
915 pci_conf_write(pa->pa_pc, pa->pa_tag,
916 sc->sc_pcix_offset + PCI_PCIX_CMD,
917 pcix_cmd);
918 }
919 }
920 }
921 /*
922 * The quad port adapter is special; it has a PCIX-PCIX
923 * bridge on the board, and can run the secondary bus at
924 * a higher speed.
925 */
926 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
927 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
928 : 66;
929 } else if (sc->sc_flags & WM_F_PCIX) {
930 switch (reg & STATUS_PCIXSPD_MASK) {
931 case STATUS_PCIXSPD_50_66:
932 sc->sc_bus_speed = 66;
933 break;
934 case STATUS_PCIXSPD_66_100:
935 sc->sc_bus_speed = 100;
936 break;
937 case STATUS_PCIXSPD_100_133:
938 sc->sc_bus_speed = 133;
939 break;
940 default:
941 aprint_error(
942 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
943 sc->sc_dev.dv_xname,
944 reg & STATUS_PCIXSPD_MASK);
945 sc->sc_bus_speed = 66;
946 }
947 } else
948 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
949 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
950 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
951 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
952 }
953
954 /*
955 * Allocate the control data structures, and create and load the
956 * DMA map for it.
957 *
958 * NOTE: All Tx descriptors must be in the same 4G segment of
959 * memory. So must Rx descriptors. We simplify by allocating
960 * both sets within the same 4G segment.
961 */
962 if ((error = bus_dmamem_alloc(sc->sc_dmat,
963 sizeof(struct wm_control_data),
964 PAGE_SIZE, (bus_size_t) 0x100000000ULL,
965 &seg, 1, &rseg, 0)) != 0) {
966 aprint_error(
967 "%s: unable to allocate control data, error = %d\n",
968 sc->sc_dev.dv_xname, error);
969 goto fail_0;
970 }
971
972 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
973 sizeof(struct wm_control_data),
974 (caddr_t *)&sc->sc_control_data, 0)) != 0) {
975 aprint_error("%s: unable to map control data, error = %d\n",
976 sc->sc_dev.dv_xname, error);
977 goto fail_1;
978 }
979
980 if ((error = bus_dmamap_create(sc->sc_dmat,
981 sizeof(struct wm_control_data), 1,
982 sizeof(struct wm_control_data), 0, 0,
983 &sc->sc_cddmamap)) != 0) {
984 aprint_error("%s: unable to create control data DMA map, "
985 "error = %d\n", sc->sc_dev.dv_xname, error);
986 goto fail_2;
987 }
988
989 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
990 sc->sc_control_data,
991 sizeof(struct wm_control_data), NULL,
992 0)) != 0) {
993 aprint_error(
994 "%s: unable to load control data DMA map, error = %d\n",
995 sc->sc_dev.dv_xname, error);
996 goto fail_3;
997 }
998
999 /*
1000 * Create the transmit buffer DMA maps.
1001 */
1002 for (i = 0; i < WM_TXQUEUELEN; i++) {
1003 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
1004 WM_NTXSEGS, MCLBYTES, 0, 0,
1005 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1006 aprint_error("%s: unable to create Tx DMA map %d, "
1007 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1008 goto fail_4;
1009 }
1010 }
1011
1012 /*
1013 * Create the receive buffer DMA maps.
1014 */
1015 for (i = 0; i < WM_NRXDESC; i++) {
1016 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1017 MCLBYTES, 0, 0,
1018 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1019 aprint_error("%s: unable to create Rx DMA map %d, "
1020 "error = %d\n", sc->sc_dev.dv_xname, i, error);
1021 goto fail_5;
1022 }
1023 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1024 }
1025
1026 /*
1027 * Reset the chip to a known state.
1028 */
1029 wm_reset(sc);
1030
1031 /*
1032 * Get some information about the EEPROM.
1033 */
1034 if (sc->sc_type >= WM_T_82540)
1035 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1036 if (sc->sc_type <= WM_T_82544)
1037 sc->sc_ee_addrbits = 6;
1038 else if (sc->sc_type <= WM_T_82546_3) {
1039 reg = CSR_READ(sc, WMREG_EECD);
1040 if (reg & EECD_EE_SIZE)
1041 sc->sc_ee_addrbits = 8;
1042 else
1043 sc->sc_ee_addrbits = 6;
1044 } else if (sc->sc_type <= WM_T_82547_2) {
1045 reg = CSR_READ(sc, WMREG_EECD);
1046 if (reg & EECD_EE_TYPE) {
1047 sc->sc_flags |= WM_F_EEPROM_SPI;
1048 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1049 } else
1050 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1051 } else {
1052 /* Assume everything else is SPI. */
1053 reg = CSR_READ(sc, WMREG_EECD);
1054 sc->sc_flags |= WM_F_EEPROM_SPI;
1055 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1056 }
1057 if (sc->sc_flags & WM_F_EEPROM_SPI)
1058 eetype = "SPI";
1059 else
1060 eetype = "MicroWire";
1061 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1062 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1063 sc->sc_ee_addrbits, eetype);
1064
1065 /*
1066 * Read the Ethernet address from the EEPROM.
1067 */
1068 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1069 sizeof(myea) / sizeof(myea[0]), myea)) {
1070 aprint_error("%s: unable to read Ethernet address\n",
1071 sc->sc_dev.dv_xname);
1072 return;
1073 }
1074 enaddr[0] = myea[0] & 0xff;
1075 enaddr[1] = myea[0] >> 8;
1076 enaddr[2] = myea[1] & 0xff;
1077 enaddr[3] = myea[1] >> 8;
1078 enaddr[4] = myea[2] & 0xff;
1079 enaddr[5] = myea[2] >> 8;
1080
1081 /*
1082 * Toggle the LSB of the MAC address on the second port
1083 * of the i82546.
1084 */
1085 if (sc->sc_type == WM_T_82546) {
1086 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1087 enaddr[5] ^= 1;
1088 }
1089
1090 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1091 ether_sprintf(enaddr));
1092
1093 /*
1094 * Read the config info from the EEPROM, and set up various
1095 * bits in the control registers based on their contents.
1096 */
1097 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1098 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1099 sc->sc_dev.dv_xname);
1100 return;
1101 }
1102 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1103 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1104 sc->sc_dev.dv_xname);
1105 return;
1106 }
1107 if (sc->sc_type >= WM_T_82544) {
1108 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1109 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1110 sc->sc_dev.dv_xname);
1111 return;
1112 }
1113 }
1114
1115 if (cfg1 & EEPROM_CFG1_ILOS)
1116 sc->sc_ctrl |= CTRL_ILOS;
1117 if (sc->sc_type >= WM_T_82544) {
1118 sc->sc_ctrl |=
1119 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1120 CTRL_SWDPIO_SHIFT;
1121 sc->sc_ctrl |=
1122 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1123 CTRL_SWDPINS_SHIFT;
1124 } else {
1125 sc->sc_ctrl |=
1126 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1127 CTRL_SWDPIO_SHIFT;
1128 }
1129
1130 #if 0
1131 if (sc->sc_type >= WM_T_82544) {
1132 if (cfg1 & EEPROM_CFG1_IPS0)
1133 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1134 if (cfg1 & EEPROM_CFG1_IPS1)
1135 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1136 sc->sc_ctrl_ext |=
1137 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1138 CTRL_EXT_SWDPIO_SHIFT;
1139 sc->sc_ctrl_ext |=
1140 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1141 CTRL_EXT_SWDPINS_SHIFT;
1142 } else {
1143 sc->sc_ctrl_ext |=
1144 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1145 CTRL_EXT_SWDPIO_SHIFT;
1146 }
1147 #endif
1148
1149 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1150 #if 0
1151 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1152 #endif
1153
1154 /*
1155 * Set up some register offsets that are different between
1156 * the i82542 and the i82543 and later chips.
1157 */
1158 if (sc->sc_type < WM_T_82543) {
1159 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1160 sc->sc_tdt_reg = WMREG_OLD_TDT;
1161 } else {
1162 sc->sc_rdt_reg = WMREG_RDT;
1163 sc->sc_tdt_reg = WMREG_TDT;
1164 }
1165
1166 /*
1167 * Determine if we're TBI or GMII mode, and initialize the
1168 * media structures accordingly.
1169 */
1170 if (sc->sc_type < WM_T_82543 ||
1171 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1172 if (wmp->wmp_flags & WMP_F_1000T)
1173 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1174 "product!\n", sc->sc_dev.dv_xname);
1175 wm_tbi_mediainit(sc);
1176 } else {
1177 if (wmp->wmp_flags & WMP_F_1000X)
1178 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1179 "product!\n", sc->sc_dev.dv_xname);
1180 wm_gmii_mediainit(sc);
1181 }
1182
1183 ifp = &sc->sc_ethercom.ec_if;
1184 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1185 ifp->if_softc = sc;
1186 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1187 ifp->if_ioctl = wm_ioctl;
1188 ifp->if_start = wm_start;
1189 ifp->if_watchdog = wm_watchdog;
1190 ifp->if_init = wm_init;
1191 ifp->if_stop = wm_stop;
1192 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1193 IFQ_SET_READY(&ifp->if_snd);
1194
1195 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1196
1197 /*
1198 * If we're a i82543 or greater, we can support VLANs.
1199 */
1200 if (sc->sc_type >= WM_T_82543)
1201 sc->sc_ethercom.ec_capabilities |=
1202 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1203
1204 /*
1205 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1206 * on i82543 and later.
1207 */
1208 if (sc->sc_type >= WM_T_82543)
1209 ifp->if_capabilities |=
1210 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1211
1212 /*
1213 * Attach the interface.
1214 */
1215 if_attach(ifp);
1216 ether_ifattach(ifp, enaddr);
1217 #if NRND > 0
1218 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1219 RND_TYPE_NET, 0);
1220 #endif
1221
1222 #ifdef WM_EVENT_COUNTERS
1223 /* Attach event counters. */
1224 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1225 NULL, sc->sc_dev.dv_xname, "txsstall");
1226 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1227 NULL, sc->sc_dev.dv_xname, "txdstall");
1228 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1229 NULL, sc->sc_dev.dv_xname, "txforceintr");
1230 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1231 NULL, sc->sc_dev.dv_xname, "txdw");
1232 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1233 NULL, sc->sc_dev.dv_xname, "txqe");
1234 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1235 NULL, sc->sc_dev.dv_xname, "rxintr");
1236 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1237 NULL, sc->sc_dev.dv_xname, "linkintr");
1238
1239 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1240 NULL, sc->sc_dev.dv_xname, "rxipsum");
1241 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1242 NULL, sc->sc_dev.dv_xname, "rxtusum");
1243 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1244 NULL, sc->sc_dev.dv_xname, "txipsum");
1245 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1246 NULL, sc->sc_dev.dv_xname, "txtusum");
1247
1248 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1249 NULL, sc->sc_dev.dv_xname, "txctx init");
1250 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1251 NULL, sc->sc_dev.dv_xname, "txctx hit");
1252 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1253 NULL, sc->sc_dev.dv_xname, "txctx miss");
1254
1255 for (i = 0; i < WM_NTXSEGS; i++)
1256 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1257 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1258
1259 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1260 NULL, sc->sc_dev.dv_xname, "txdrop");
1261
1262 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1263 NULL, sc->sc_dev.dv_xname, "tu");
1264
1265 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1266 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1267 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1268 NULL, sc->sc_dev.dv_xname, "tx_xon");
1269 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1270 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1271 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1272 NULL, sc->sc_dev.dv_xname, "rx_xon");
1273 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1274 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1275 #endif /* WM_EVENT_COUNTERS */
1276
1277 /*
1278 * Make sure the interface is shutdown during reboot.
1279 */
1280 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1281 if (sc->sc_sdhook == NULL)
1282 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1283 sc->sc_dev.dv_xname);
1284 return;
1285
1286 /*
1287 * Free any resources we've allocated during the failed attach
1288 * attempt. Do this in reverse order and fall through.
1289 */
1290 fail_5:
1291 for (i = 0; i < WM_NRXDESC; i++) {
1292 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1293 bus_dmamap_destroy(sc->sc_dmat,
1294 sc->sc_rxsoft[i].rxs_dmamap);
1295 }
1296 fail_4:
1297 for (i = 0; i < WM_TXQUEUELEN; i++) {
1298 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1299 bus_dmamap_destroy(sc->sc_dmat,
1300 sc->sc_txsoft[i].txs_dmamap);
1301 }
1302 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1303 fail_3:
1304 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1305 fail_2:
1306 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1307 sizeof(struct wm_control_data));
1308 fail_1:
1309 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1310 fail_0:
1311 return;
1312 }
1313
1314 /*
1315 * wm_shutdown:
1316 *
1317 * Make sure the interface is stopped at reboot time.
1318 */
1319 static void
1320 wm_shutdown(void *arg)
1321 {
1322 struct wm_softc *sc = arg;
1323
1324 wm_stop(&sc->sc_ethercom.ec_if, 1);
1325 }
1326
1327 /*
1328 * wm_tx_cksum:
1329 *
1330 * Set up TCP/IP checksumming parameters for the
1331 * specified packet.
1332 */
1333 static int
1334 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1335 uint8_t *fieldsp)
1336 {
1337 struct mbuf *m0 = txs->txs_mbuf;
1338 struct livengood_tcpip_ctxdesc *t;
1339 uint32_t ipcs, tucs;
1340 struct ip *ip;
1341 struct ether_header *eh;
1342 int offset, iphl;
1343 uint8_t fields = 0;
1344
1345 /*
1346 * XXX It would be nice if the mbuf pkthdr had offset
1347 * fields for the protocol headers.
1348 */
1349
1350 eh = mtod(m0, struct ether_header *);
1351 switch (htons(eh->ether_type)) {
1352 case ETHERTYPE_IP:
1353 iphl = sizeof(struct ip);
1354 offset = ETHER_HDR_LEN;
1355 break;
1356
1357 case ETHERTYPE_VLAN:
1358 iphl = sizeof(struct ip);
1359 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1360 break;
1361
1362 default:
1363 /*
1364 * Don't support this protocol or encapsulation.
1365 */
1366 *fieldsp = 0;
1367 *cmdp = 0;
1368 return (0);
1369 }
1370
1371 if (m0->m_len < (offset + iphl)) {
1372 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1373 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1374 "packet dropped\n", sc->sc_dev.dv_xname);
1375 return (ENOMEM);
1376 }
1377 m0 = txs->txs_mbuf;
1378 }
1379
1380 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1381 iphl = ip->ip_hl << 2;
1382
1383 /*
1384 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1385 * offload feature, if we load the context descriptor, we
1386 * MUST provide valid values for IPCSS and TUCSS fields.
1387 */
1388
1389 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1390 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1391 fields |= WTX_IXSM;
1392 ipcs = WTX_TCPIP_IPCSS(offset) |
1393 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1394 WTX_TCPIP_IPCSE(offset + iphl - 1);
1395 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1396 /* Use the cached value. */
1397 ipcs = sc->sc_txctx_ipcs;
1398 } else {
1399 /* Just initialize it to the likely value anyway. */
1400 ipcs = WTX_TCPIP_IPCSS(offset) |
1401 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1402 WTX_TCPIP_IPCSE(offset + iphl - 1);
1403 }
1404
1405 offset += iphl;
1406
1407 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1408 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1409 fields |= WTX_TXSM;
1410 tucs = WTX_TCPIP_TUCSS(offset) |
1411 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1412 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1413 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1414 /* Use the cached value. */
1415 tucs = sc->sc_txctx_tucs;
1416 } else {
1417 /* Just initialize it to a valid TCP context. */
1418 tucs = WTX_TCPIP_TUCSS(offset) |
1419 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1420 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1421 }
1422
1423 if (sc->sc_txctx_ipcs == ipcs &&
1424 sc->sc_txctx_tucs == tucs) {
1425 /* Cached context is fine. */
1426 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1427 } else {
1428 /* Fill in the context descriptor. */
1429 #ifdef WM_EVENT_COUNTERS
1430 if (sc->sc_txctx_ipcs == 0xffffffff &&
1431 sc->sc_txctx_tucs == 0xffffffff)
1432 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1433 else
1434 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1435 #endif
1436 t = (struct livengood_tcpip_ctxdesc *)
1437 &sc->sc_txdescs[sc->sc_txnext];
1438 t->tcpip_ipcs = htole32(ipcs);
1439 t->tcpip_tucs = htole32(tucs);
1440 t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1441 t->tcpip_seg = 0;
1442 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1443
1444 sc->sc_txctx_ipcs = ipcs;
1445 sc->sc_txctx_tucs = tucs;
1446
1447 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1448 txs->txs_ndesc++;
1449 }
1450
1451 *cmdp = WTX_CMD_DEXT | WTX_DTYP_D;
1452 *fieldsp = fields;
1453
1454 return (0);
1455 }
1456
1457 /*
1458 * wm_start: [ifnet interface function]
1459 *
1460 * Start packet transmission on the interface.
1461 */
1462 static void
1463 wm_start(struct ifnet *ifp)
1464 {
1465 struct wm_softc *sc = ifp->if_softc;
1466 struct mbuf *m0;
1467 #if 0 /* XXXJRT */
1468 struct m_tag *mtag;
1469 #endif
1470 struct wm_txsoft *txs;
1471 bus_dmamap_t dmamap;
1472 int error, nexttx, lasttx = -1, ofree, seg;
1473 uint32_t cksumcmd;
1474 uint8_t cksumfields;
1475
1476 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1477 return;
1478
1479 /*
1480 * Remember the previous number of free descriptors.
1481 */
1482 ofree = sc->sc_txfree;
1483
1484 /*
1485 * Loop through the send queue, setting up transmit descriptors
1486 * until we drain the queue, or use up all available transmit
1487 * descriptors.
1488 */
1489 for (;;) {
1490 /* Grab a packet off the queue. */
1491 IFQ_POLL(&ifp->if_snd, m0);
1492 if (m0 == NULL)
1493 break;
1494
1495 DPRINTF(WM_DEBUG_TX,
1496 ("%s: TX: have packet to transmit: %p\n",
1497 sc->sc_dev.dv_xname, m0));
1498
1499 /* Get a work queue entry. */
1500 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1501 wm_txintr(sc);
1502 if (sc->sc_txsfree == 0) {
1503 DPRINTF(WM_DEBUG_TX,
1504 ("%s: TX: no free job descriptors\n",
1505 sc->sc_dev.dv_xname));
1506 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1507 break;
1508 }
1509 }
1510
1511 txs = &sc->sc_txsoft[sc->sc_txsnext];
1512 dmamap = txs->txs_dmamap;
1513
1514 /*
1515 * Load the DMA map. If this fails, the packet either
1516 * didn't fit in the allotted number of segments, or we
1517 * were short on resources. For the too-many-segments
1518 * case, we simply report an error and drop the packet,
1519 * since we can't sanely copy a jumbo packet to a single
1520 * buffer.
1521 */
1522 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1523 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1524 if (error) {
1525 if (error == EFBIG) {
1526 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1527 printf("%s: Tx packet consumes too many "
1528 "DMA segments, dropping...\n",
1529 sc->sc_dev.dv_xname);
1530 IFQ_DEQUEUE(&ifp->if_snd, m0);
1531 m_freem(m0);
1532 continue;
1533 }
1534 /*
1535 * Short on resources, just stop for now.
1536 */
1537 DPRINTF(WM_DEBUG_TX,
1538 ("%s: TX: dmamap load failed: %d\n",
1539 sc->sc_dev.dv_xname, error));
1540 break;
1541 }
1542
1543 /*
1544 * Ensure we have enough descriptors free to describe
1545 * the packet. Note, we always reserve one descriptor
1546 * at the end of the ring due to the semantics of the
1547 * TDT register, plus one more in the event we need
1548 * to re-load checksum offload context.
1549 */
1550 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1551 /*
1552 * Not enough free descriptors to transmit this
1553 * packet. We haven't committed anything yet,
1554 * so just unload the DMA map, put the packet
1555 * pack on the queue, and punt. Notify the upper
1556 * layer that there are no more slots left.
1557 */
1558 DPRINTF(WM_DEBUG_TX,
1559 ("%s: TX: need %d descriptors, have %d\n",
1560 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1561 sc->sc_txfree - 1));
1562 ifp->if_flags |= IFF_OACTIVE;
1563 bus_dmamap_unload(sc->sc_dmat, dmamap);
1564 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1565 break;
1566 }
1567
1568 IFQ_DEQUEUE(&ifp->if_snd, m0);
1569
1570 /*
1571 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1572 */
1573
1574 /* Sync the DMA map. */
1575 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1576 BUS_DMASYNC_PREWRITE);
1577
1578 DPRINTF(WM_DEBUG_TX,
1579 ("%s: TX: packet has %d DMA segments\n",
1580 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1581
1582 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1583
1584 /*
1585 * Store a pointer to the packet so that we can free it
1586 * later.
1587 *
1588 * Initially, we consider the number of descriptors the
1589 * packet uses the number of DMA segments. This may be
1590 * incremented by 1 if we do checksum offload (a descriptor
1591 * is used to set the checksum context).
1592 */
1593 txs->txs_mbuf = m0;
1594 txs->txs_firstdesc = sc->sc_txnext;
1595 txs->txs_ndesc = dmamap->dm_nsegs;
1596
1597 /*
1598 * Set up checksum offload parameters for
1599 * this packet.
1600 */
1601 if (m0->m_pkthdr.csum_flags &
1602 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1603 if (wm_tx_cksum(sc, txs, &cksumcmd,
1604 &cksumfields) != 0) {
1605 /* Error message already displayed. */
1606 bus_dmamap_unload(sc->sc_dmat, dmamap);
1607 continue;
1608 }
1609 } else {
1610 cksumcmd = 0;
1611 cksumfields = 0;
1612 }
1613
1614 cksumcmd |= WTX_CMD_IDE;
1615
1616 /*
1617 * Initialize the transmit descriptor.
1618 */
1619 for (nexttx = sc->sc_txnext, seg = 0;
1620 seg < dmamap->dm_nsegs;
1621 seg++, nexttx = WM_NEXTTX(nexttx)) {
1622 wm_set_dma_addr(&sc->sc_txdescs[nexttx].wtx_addr,
1623 dmamap->dm_segs[seg].ds_addr);
1624 sc->sc_txdescs[nexttx].wtx_cmdlen =
1625 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1626 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
1627 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
1628 cksumfields;
1629 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
1630 lasttx = nexttx;
1631
1632 DPRINTF(WM_DEBUG_TX,
1633 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1634 sc->sc_dev.dv_xname, nexttx,
1635 (u_int)le32toh(dmamap->dm_segs[seg].ds_addr),
1636 (u_int)le32toh(dmamap->dm_segs[seg].ds_len)));
1637 }
1638
1639 KASSERT(lasttx != -1);
1640
1641 /*
1642 * Set up the command byte on the last descriptor of
1643 * the packet. If we're in the interrupt delay window,
1644 * delay the interrupt.
1645 */
1646 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1647 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1648
1649 #if 0 /* XXXJRT */
1650 /*
1651 * If VLANs are enabled and the packet has a VLAN tag, set
1652 * up the descriptor to encapsulate the packet for us.
1653 *
1654 * This is only valid on the last descriptor of the packet.
1655 */
1656 if (sc->sc_ethercom.ec_nvlans != 0 &&
1657 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1658 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1659 htole32(WTX_CMD_VLE);
1660 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
1661 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1662 }
1663 #endif /* XXXJRT */
1664
1665 txs->txs_lastdesc = lasttx;
1666
1667 DPRINTF(WM_DEBUG_TX,
1668 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1669 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
1670
1671 /* Sync the descriptors we're using. */
1672 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1673 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1674
1675 /* Give the packet to the chip. */
1676 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1677
1678 DPRINTF(WM_DEBUG_TX,
1679 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1680
1681 DPRINTF(WM_DEBUG_TX,
1682 ("%s: TX: finished transmitting packet, job %d\n",
1683 sc->sc_dev.dv_xname, sc->sc_txsnext));
1684
1685 /* Advance the tx pointer. */
1686 sc->sc_txfree -= txs->txs_ndesc;
1687 sc->sc_txnext = nexttx;
1688
1689 sc->sc_txsfree--;
1690 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1691
1692 #if NBPFILTER > 0
1693 /* Pass the packet to any BPF listeners. */
1694 if (ifp->if_bpf)
1695 bpf_mtap(ifp->if_bpf, m0);
1696 #endif /* NBPFILTER > 0 */
1697 }
1698
1699 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1700 /* No more slots; notify upper layer. */
1701 ifp->if_flags |= IFF_OACTIVE;
1702 }
1703
1704 if (sc->sc_txfree != ofree) {
1705 /* Set a watchdog timer in case the chip flakes out. */
1706 ifp->if_timer = 5;
1707 }
1708 }
1709
1710 /*
1711 * wm_watchdog: [ifnet interface function]
1712 *
1713 * Watchdog timer handler.
1714 */
1715 static void
1716 wm_watchdog(struct ifnet *ifp)
1717 {
1718 struct wm_softc *sc = ifp->if_softc;
1719
1720 /*
1721 * Since we're using delayed interrupts, sweep up
1722 * before we report an error.
1723 */
1724 wm_txintr(sc);
1725
1726 if (sc->sc_txfree != WM_NTXDESC) {
1727 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1728 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1729 sc->sc_txnext);
1730 ifp->if_oerrors++;
1731
1732 /* Reset the interface. */
1733 (void) wm_init(ifp);
1734 }
1735
1736 /* Try to get more packets going. */
1737 wm_start(ifp);
1738 }
1739
1740 /*
1741 * wm_ioctl: [ifnet interface function]
1742 *
1743 * Handle control requests from the operator.
1744 */
1745 static int
1746 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1747 {
1748 struct wm_softc *sc = ifp->if_softc;
1749 struct ifreq *ifr = (struct ifreq *) data;
1750 int s, error;
1751
1752 s = splnet();
1753
1754 switch (cmd) {
1755 case SIOCSIFMEDIA:
1756 case SIOCGIFMEDIA:
1757 /* Flow control requires full-duplex mode. */
1758 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1759 (ifr->ifr_media & IFM_FDX) == 0)
1760 ifr->ifr_media &= ~IFM_ETH_FMASK;
1761 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1762 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1763 /* We can do both TXPAUSE and RXPAUSE. */
1764 ifr->ifr_media |=
1765 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1766 }
1767 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1768 }
1769 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1770 break;
1771 default:
1772 error = ether_ioctl(ifp, cmd, data);
1773 if (error == ENETRESET) {
1774 /*
1775 * Multicast list has changed; set the hardware filter
1776 * accordingly.
1777 */
1778 wm_set_filter(sc);
1779 error = 0;
1780 }
1781 break;
1782 }
1783
1784 /* Try to get more packets going. */
1785 wm_start(ifp);
1786
1787 splx(s);
1788 return (error);
1789 }
1790
1791 /*
1792 * wm_intr:
1793 *
1794 * Interrupt service routine.
1795 */
1796 static int
1797 wm_intr(void *arg)
1798 {
1799 struct wm_softc *sc = arg;
1800 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1801 uint32_t icr;
1802 int wantinit, handled = 0;
1803
1804 for (wantinit = 0; wantinit == 0;) {
1805 icr = CSR_READ(sc, WMREG_ICR);
1806 if ((icr & sc->sc_icr) == 0)
1807 break;
1808
1809 #if 0 /*NRND > 0*/
1810 if (RND_ENABLED(&sc->rnd_source))
1811 rnd_add_uint32(&sc->rnd_source, icr);
1812 #endif
1813
1814 handled = 1;
1815
1816 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1817 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1818 DPRINTF(WM_DEBUG_RX,
1819 ("%s: RX: got Rx intr 0x%08x\n",
1820 sc->sc_dev.dv_xname,
1821 icr & (ICR_RXDMT0|ICR_RXT0)));
1822 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1823 }
1824 #endif
1825 wm_rxintr(sc);
1826
1827 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1828 if (icr & ICR_TXDW) {
1829 DPRINTF(WM_DEBUG_TX,
1830 ("%s: TX: got TXDW interrupt\n",
1831 sc->sc_dev.dv_xname));
1832 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1833 }
1834 #endif
1835 wm_txintr(sc);
1836
1837 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1838 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1839 wm_linkintr(sc, icr);
1840 }
1841
1842 if (icr & ICR_RXO) {
1843 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1844 wantinit = 1;
1845 }
1846 }
1847
1848 if (handled) {
1849 if (wantinit)
1850 wm_init(ifp);
1851
1852 /* Try to get more packets going. */
1853 wm_start(ifp);
1854 }
1855
1856 return (handled);
1857 }
1858
1859 /*
1860 * wm_txintr:
1861 *
1862 * Helper; handle transmit interrupts.
1863 */
1864 static void
1865 wm_txintr(struct wm_softc *sc)
1866 {
1867 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1868 struct wm_txsoft *txs;
1869 uint8_t status;
1870 int i;
1871
1872 ifp->if_flags &= ~IFF_OACTIVE;
1873
1874 /*
1875 * Go through the Tx list and free mbufs for those
1876 * frames which have been transmitted.
1877 */
1878 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1879 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1880 txs = &sc->sc_txsoft[i];
1881
1882 DPRINTF(WM_DEBUG_TX,
1883 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1884
1885 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1886 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1887
1888 status =
1889 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
1890 if ((status & WTX_ST_DD) == 0) {
1891 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1892 BUS_DMASYNC_PREREAD);
1893 break;
1894 }
1895
1896 DPRINTF(WM_DEBUG_TX,
1897 ("%s: TX: job %d done: descs %d..%d\n",
1898 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1899 txs->txs_lastdesc));
1900
1901 /*
1902 * XXX We should probably be using the statistics
1903 * XXX registers, but I don't know if they exist
1904 * XXX on chips before the i82544.
1905 */
1906
1907 #ifdef WM_EVENT_COUNTERS
1908 if (status & WTX_ST_TU)
1909 WM_EVCNT_INCR(&sc->sc_ev_tu);
1910 #endif /* WM_EVENT_COUNTERS */
1911
1912 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1913 ifp->if_oerrors++;
1914 if (status & WTX_ST_LC)
1915 printf("%s: late collision\n",
1916 sc->sc_dev.dv_xname);
1917 else if (status & WTX_ST_EC) {
1918 ifp->if_collisions += 16;
1919 printf("%s: excessive collisions\n",
1920 sc->sc_dev.dv_xname);
1921 }
1922 } else
1923 ifp->if_opackets++;
1924
1925 sc->sc_txfree += txs->txs_ndesc;
1926 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1927 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1928 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1929 m_freem(txs->txs_mbuf);
1930 txs->txs_mbuf = NULL;
1931 }
1932
1933 /* Update the dirty transmit buffer pointer. */
1934 sc->sc_txsdirty = i;
1935 DPRINTF(WM_DEBUG_TX,
1936 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1937
1938 /*
1939 * If there are no more pending transmissions, cancel the watchdog
1940 * timer.
1941 */
1942 if (sc->sc_txsfree == WM_TXQUEUELEN)
1943 ifp->if_timer = 0;
1944 }
1945
1946 /*
1947 * wm_rxintr:
1948 *
1949 * Helper; handle receive interrupts.
1950 */
1951 static void
1952 wm_rxintr(struct wm_softc *sc)
1953 {
1954 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1955 struct wm_rxsoft *rxs;
1956 struct mbuf *m;
1957 int i, len;
1958 uint8_t status, errors;
1959
1960 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1961 rxs = &sc->sc_rxsoft[i];
1962
1963 DPRINTF(WM_DEBUG_RX,
1964 ("%s: RX: checking descriptor %d\n",
1965 sc->sc_dev.dv_xname, i));
1966
1967 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1968
1969 status = sc->sc_rxdescs[i].wrx_status;
1970 errors = sc->sc_rxdescs[i].wrx_errors;
1971 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1972
1973 if ((status & WRX_ST_DD) == 0) {
1974 /*
1975 * We have processed all of the receive descriptors.
1976 */
1977 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1978 break;
1979 }
1980
1981 if (__predict_false(sc->sc_rxdiscard)) {
1982 DPRINTF(WM_DEBUG_RX,
1983 ("%s: RX: discarding contents of descriptor %d\n",
1984 sc->sc_dev.dv_xname, i));
1985 WM_INIT_RXDESC(sc, i);
1986 if (status & WRX_ST_EOP) {
1987 /* Reset our state. */
1988 DPRINTF(WM_DEBUG_RX,
1989 ("%s: RX: resetting rxdiscard -> 0\n",
1990 sc->sc_dev.dv_xname));
1991 sc->sc_rxdiscard = 0;
1992 }
1993 continue;
1994 }
1995
1996 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1997 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1998
1999 m = rxs->rxs_mbuf;
2000
2001 /*
2002 * Add a new receive buffer to the ring.
2003 */
2004 if (wm_add_rxbuf(sc, i) != 0) {
2005 /*
2006 * Failed, throw away what we've done so
2007 * far, and discard the rest of the packet.
2008 */
2009 ifp->if_ierrors++;
2010 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2011 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2012 WM_INIT_RXDESC(sc, i);
2013 if ((status & WRX_ST_EOP) == 0)
2014 sc->sc_rxdiscard = 1;
2015 if (sc->sc_rxhead != NULL)
2016 m_freem(sc->sc_rxhead);
2017 WM_RXCHAIN_RESET(sc);
2018 DPRINTF(WM_DEBUG_RX,
2019 ("%s: RX: Rx buffer allocation failed, "
2020 "dropping packet%s\n", sc->sc_dev.dv_xname,
2021 sc->sc_rxdiscard ? " (discard)" : ""));
2022 continue;
2023 }
2024
2025 WM_RXCHAIN_LINK(sc, m);
2026
2027 m->m_len = len;
2028
2029 DPRINTF(WM_DEBUG_RX,
2030 ("%s: RX: buffer at %p len %d\n",
2031 sc->sc_dev.dv_xname, m->m_data, len));
2032
2033 /*
2034 * If this is not the end of the packet, keep
2035 * looking.
2036 */
2037 if ((status & WRX_ST_EOP) == 0) {
2038 sc->sc_rxlen += len;
2039 DPRINTF(WM_DEBUG_RX,
2040 ("%s: RX: not yet EOP, rxlen -> %d\n",
2041 sc->sc_dev.dv_xname, sc->sc_rxlen));
2042 continue;
2043 }
2044
2045 /*
2046 * Okay, we have the entire packet now...
2047 */
2048 *sc->sc_rxtailp = NULL;
2049 m = sc->sc_rxhead;
2050 len += sc->sc_rxlen;
2051
2052 WM_RXCHAIN_RESET(sc);
2053
2054 DPRINTF(WM_DEBUG_RX,
2055 ("%s: RX: have entire packet, len -> %d\n",
2056 sc->sc_dev.dv_xname, len));
2057
2058 /*
2059 * If an error occurred, update stats and drop the packet.
2060 */
2061 if (errors &
2062 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2063 ifp->if_ierrors++;
2064 if (errors & WRX_ER_SE)
2065 printf("%s: symbol error\n",
2066 sc->sc_dev.dv_xname);
2067 else if (errors & WRX_ER_SEQ)
2068 printf("%s: receive sequence error\n",
2069 sc->sc_dev.dv_xname);
2070 else if (errors & WRX_ER_CE)
2071 printf("%s: CRC error\n",
2072 sc->sc_dev.dv_xname);
2073 m_freem(m);
2074 continue;
2075 }
2076
2077 /*
2078 * No errors. Receive the packet.
2079 *
2080 * Note, we have configured the chip to include the
2081 * CRC with every packet.
2082 */
2083 m->m_flags |= M_HASFCS;
2084 m->m_pkthdr.rcvif = ifp;
2085 m->m_pkthdr.len = len;
2086
2087 #if 0 /* XXXJRT */
2088 /*
2089 * If VLANs are enabled, VLAN packets have been unwrapped
2090 * for us. Associate the tag with the packet.
2091 */
2092 if (sc->sc_ethercom.ec_nvlans != 0 &&
2093 (status & WRX_ST_VP) != 0) {
2094 struct m_tag *vtag;
2095
2096 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2097 M_NOWAIT);
2098 if (vtag == NULL) {
2099 ifp->if_ierrors++;
2100 printf("%s: unable to allocate VLAN tag\n",
2101 sc->sc_dev.dv_xname);
2102 m_freem(m);
2103 continue;
2104 }
2105
2106 *(u_int *)(vtag + 1) =
2107 le16toh(sc->sc_rxdescs[i].wrx_special);
2108 }
2109 #endif /* XXXJRT */
2110
2111 /*
2112 * Set up checksum info for this packet.
2113 */
2114 if (status & WRX_ST_IPCS) {
2115 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2116 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2117 if (errors & WRX_ER_IPE)
2118 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2119 }
2120 if (status & WRX_ST_TCPCS) {
2121 /*
2122 * Note: we don't know if this was TCP or UDP,
2123 * so we just set both bits, and expect the
2124 * upper layers to deal.
2125 */
2126 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2127 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2128 if (errors & WRX_ER_TCPE)
2129 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2130 }
2131
2132 ifp->if_ipackets++;
2133
2134 #if NBPFILTER > 0
2135 /* Pass this up to any BPF listeners. */
2136 if (ifp->if_bpf)
2137 bpf_mtap(ifp->if_bpf, m);
2138 #endif /* NBPFILTER > 0 */
2139
2140 /* Pass it on. */
2141 (*ifp->if_input)(ifp, m);
2142 }
2143
2144 /* Update the receive pointer. */
2145 sc->sc_rxptr = i;
2146
2147 DPRINTF(WM_DEBUG_RX,
2148 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2149 }
2150
2151 /*
2152 * wm_linkintr:
2153 *
2154 * Helper; handle link interrupts.
2155 */
2156 static void
2157 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2158 {
2159 uint32_t status;
2160
2161 /*
2162 * If we get a link status interrupt on a 1000BASE-T
2163 * device, just fall into the normal MII tick path.
2164 */
2165 if (sc->sc_flags & WM_F_HAS_MII) {
2166 if (icr & ICR_LSC) {
2167 DPRINTF(WM_DEBUG_LINK,
2168 ("%s: LINK: LSC -> mii_tick\n",
2169 sc->sc_dev.dv_xname));
2170 mii_tick(&sc->sc_mii);
2171 } else if (icr & ICR_RXSEQ) {
2172 DPRINTF(WM_DEBUG_LINK,
2173 ("%s: LINK Receive sequence error\n",
2174 sc->sc_dev.dv_xname));
2175 }
2176 return;
2177 }
2178
2179 /*
2180 * If we are now receiving /C/, check for link again in
2181 * a couple of link clock ticks.
2182 */
2183 if (icr & ICR_RXCFG) {
2184 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2185 sc->sc_dev.dv_xname));
2186 sc->sc_tbi_anstate = 2;
2187 }
2188
2189 if (icr & ICR_LSC) {
2190 status = CSR_READ(sc, WMREG_STATUS);
2191 if (status & STATUS_LU) {
2192 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2193 sc->sc_dev.dv_xname,
2194 (status & STATUS_FD) ? "FDX" : "HDX"));
2195 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2196 sc->sc_fcrtl &= ~FCRTL_XONE;
2197 if (status & STATUS_FD)
2198 sc->sc_tctl |=
2199 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2200 else
2201 sc->sc_tctl |=
2202 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2203 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2204 sc->sc_fcrtl |= FCRTL_XONE;
2205 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2206 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2207 WMREG_OLD_FCRTL : WMREG_FCRTL,
2208 sc->sc_fcrtl);
2209 sc->sc_tbi_linkup = 1;
2210 } else {
2211 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2212 sc->sc_dev.dv_xname));
2213 sc->sc_tbi_linkup = 0;
2214 }
2215 sc->sc_tbi_anstate = 2;
2216 wm_tbi_set_linkled(sc);
2217 } else if (icr & ICR_RXSEQ) {
2218 DPRINTF(WM_DEBUG_LINK,
2219 ("%s: LINK: Receive sequence error\n",
2220 sc->sc_dev.dv_xname));
2221 }
2222 }
2223
2224 /*
2225 * wm_tick:
2226 *
2227 * One second timer, used to check link status, sweep up
2228 * completed transmit jobs, etc.
2229 */
2230 static void
2231 wm_tick(void *arg)
2232 {
2233 struct wm_softc *sc = arg;
2234 int s;
2235
2236 s = splnet();
2237
2238 if (sc->sc_type >= WM_T_82542_2_1) {
2239 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2240 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2241 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2242 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2243 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2244 }
2245
2246 if (sc->sc_flags & WM_F_HAS_MII)
2247 mii_tick(&sc->sc_mii);
2248 else
2249 wm_tbi_check_link(sc);
2250
2251 splx(s);
2252
2253 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2254 }
2255
2256 /*
2257 * wm_reset:
2258 *
2259 * Reset the i82542 chip.
2260 */
2261 static void
2262 wm_reset(struct wm_softc *sc)
2263 {
2264 int i;
2265
2266 switch (sc->sc_type) {
2267 case WM_T_82544:
2268 case WM_T_82540:
2269 case WM_T_82545:
2270 case WM_T_82546:
2271 case WM_T_82541:
2272 case WM_T_82541_2:
2273 /*
2274 * These chips have a problem with the memory-mapped
2275 * write cycle when issuing the reset, so use I/O-mapped
2276 * access, if possible.
2277 */
2278 if (sc->sc_flags & WM_F_IOH_VALID)
2279 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2280 else
2281 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2282 break;
2283
2284 case WM_T_82545_3:
2285 case WM_T_82546_3:
2286 /* Use the shadow control register on these chips. */
2287 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2288 break;
2289
2290 default:
2291 /* Everything else can safely use the documented method. */
2292 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2293 break;
2294 }
2295 delay(10000);
2296
2297 for (i = 0; i < 1000; i++) {
2298 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2299 return;
2300 delay(20);
2301 }
2302
2303 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2304 printf("%s: WARNING: reset failed to complete\n",
2305 sc->sc_dev.dv_xname);
2306 }
2307
2308 /*
2309 * wm_init: [ifnet interface function]
2310 *
2311 * Initialize the interface. Must be called at splnet().
2312 */
2313 static int
2314 wm_init(struct ifnet *ifp)
2315 {
2316 struct wm_softc *sc = ifp->if_softc;
2317 struct wm_rxsoft *rxs;
2318 int i, error = 0;
2319 uint32_t reg;
2320
2321 /*
2322 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2323 * There is a small but measurable benefit to avoiding the adjusment
2324 * of the descriptor so that the headers are aligned, for normal mtu,
2325 * on such platforms. One possibility is that the DMA itself is
2326 * slightly more efficient if the front of the entire packet (instead
2327 * of the front of the headers) is aligned.
2328 *
2329 * Note we must always set align_tweak to 0 if we are using
2330 * jumbo frames.
2331 */
2332 #ifdef __NO_STRICT_ALIGNMENT
2333 sc->sc_align_tweak = 0;
2334 #else
2335 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2336 sc->sc_align_tweak = 0;
2337 else
2338 sc->sc_align_tweak = 2;
2339 #endif /* __NO_STRICT_ALIGNMENT */
2340
2341 /* Cancel any pending I/O. */
2342 wm_stop(ifp, 0);
2343
2344 /* Reset the chip to a known state. */
2345 wm_reset(sc);
2346
2347 /* Initialize the transmit descriptor ring. */
2348 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2349 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2350 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2351 sc->sc_txfree = WM_NTXDESC;
2352 sc->sc_txnext = 0;
2353
2354 sc->sc_txctx_ipcs = 0xffffffff;
2355 sc->sc_txctx_tucs = 0xffffffff;
2356
2357 if (sc->sc_type < WM_T_82543) {
2358 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
2359 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
2360 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2361 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2362 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2363 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2364 } else {
2365 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
2366 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
2367 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2368 CSR_WRITE(sc, WMREG_TDH, 0);
2369 CSR_WRITE(sc, WMREG_TDT, 0);
2370 CSR_WRITE(sc, WMREG_TIDV, 128);
2371
2372 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2373 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2374 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2375 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2376 }
2377 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2378 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2379
2380 /* Initialize the transmit job descriptors. */
2381 for (i = 0; i < WM_TXQUEUELEN; i++)
2382 sc->sc_txsoft[i].txs_mbuf = NULL;
2383 sc->sc_txsfree = WM_TXQUEUELEN;
2384 sc->sc_txsnext = 0;
2385 sc->sc_txsdirty = 0;
2386
2387 /*
2388 * Initialize the receive descriptor and receive job
2389 * descriptor rings.
2390 */
2391 if (sc->sc_type < WM_T_82543) {
2392 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
2393 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
2394 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2395 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2396 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2397 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2398
2399 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2400 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2401 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2402 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2403 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2404 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2405 } else {
2406 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
2407 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
2408 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2409 CSR_WRITE(sc, WMREG_RDH, 0);
2410 CSR_WRITE(sc, WMREG_RDT, 0);
2411 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2412 }
2413 for (i = 0; i < WM_NRXDESC; i++) {
2414 rxs = &sc->sc_rxsoft[i];
2415 if (rxs->rxs_mbuf == NULL) {
2416 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2417 printf("%s: unable to allocate or map rx "
2418 "buffer %d, error = %d\n",
2419 sc->sc_dev.dv_xname, i, error);
2420 /*
2421 * XXX Should attempt to run with fewer receive
2422 * XXX buffers instead of just failing.
2423 */
2424 wm_rxdrain(sc);
2425 goto out;
2426 }
2427 } else
2428 WM_INIT_RXDESC(sc, i);
2429 }
2430 sc->sc_rxptr = 0;
2431 sc->sc_rxdiscard = 0;
2432 WM_RXCHAIN_RESET(sc);
2433
2434 /*
2435 * Clear out the VLAN table -- we don't use it (yet).
2436 */
2437 CSR_WRITE(sc, WMREG_VET, 0);
2438 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2439 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2440
2441 /*
2442 * Set up flow-control parameters.
2443 *
2444 * XXX Values could probably stand some tuning.
2445 */
2446 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2447 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2448 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2449
2450 sc->sc_fcrtl = FCRTL_DFLT;
2451 if (sc->sc_type < WM_T_82543) {
2452 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2453 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
2454 } else {
2455 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2456 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
2457 }
2458 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2459
2460 #if 0 /* XXXJRT */
2461 /* Deal with VLAN enables. */
2462 if (sc->sc_ethercom.ec_nvlans != 0)
2463 sc->sc_ctrl |= CTRL_VME;
2464 else
2465 #endif /* XXXJRT */
2466 sc->sc_ctrl &= ~CTRL_VME;
2467
2468 /* Write the control registers. */
2469 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2470 #if 0
2471 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2472 #endif
2473
2474 /*
2475 * Set up checksum offload parameters.
2476 */
2477 reg = CSR_READ(sc, WMREG_RXCSUM);
2478 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2479 reg |= RXCSUM_IPOFL;
2480 else
2481 reg &= ~RXCSUM_IPOFL;
2482 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2483 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2484 else {
2485 reg &= ~RXCSUM_TUOFL;
2486 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2487 reg &= ~RXCSUM_IPOFL;
2488 }
2489 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2490
2491 /*
2492 * Set up the interrupt registers.
2493 */
2494 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2495 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2496 ICR_RXO | ICR_RXT0;
2497 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2498 sc->sc_icr |= ICR_RXCFG;
2499 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2500
2501 /* Set up the inter-packet gap. */
2502 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2503
2504 #if 0 /* XXXJRT */
2505 /* Set the VLAN ethernetype. */
2506 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2507 #endif
2508
2509 /*
2510 * Set up the transmit control register; we start out with
2511 * a collision distance suitable for FDX, but update it whe
2512 * we resolve the media type.
2513 */
2514 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2515 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2516 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2517
2518 /* Set the media. */
2519 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2520
2521 /*
2522 * Set up the receive control register; we actually program
2523 * the register when we set the receive filter. Use multicast
2524 * address offset type 0.
2525 *
2526 * Only the i82544 has the ability to strip the incoming
2527 * CRC, so we don't enable that feature.
2528 */
2529 sc->sc_mchash_type = 0;
2530 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2531 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2532
2533 if(MCLBYTES == 2048) {
2534 sc->sc_rctl |= RCTL_2k;
2535 } else {
2536 if(sc->sc_type >= WM_T_82543) {
2537 switch(MCLBYTES) {
2538 case 4096:
2539 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2540 break;
2541 case 8192:
2542 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2543 break;
2544 case 16384:
2545 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2546 break;
2547 default:
2548 panic("wm_init: MCLBYTES %d unsupported",
2549 MCLBYTES);
2550 break;
2551 }
2552 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2553 }
2554
2555 /* Set the receive filter. */
2556 wm_set_filter(sc);
2557
2558 /* Start the one second link check clock. */
2559 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2560
2561 /* ...all done! */
2562 ifp->if_flags |= IFF_RUNNING;
2563 ifp->if_flags &= ~IFF_OACTIVE;
2564
2565 out:
2566 if (error)
2567 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2568 return (error);
2569 }
2570
2571 /*
2572 * wm_rxdrain:
2573 *
2574 * Drain the receive queue.
2575 */
2576 static void
2577 wm_rxdrain(struct wm_softc *sc)
2578 {
2579 struct wm_rxsoft *rxs;
2580 int i;
2581
2582 for (i = 0; i < WM_NRXDESC; i++) {
2583 rxs = &sc->sc_rxsoft[i];
2584 if (rxs->rxs_mbuf != NULL) {
2585 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2586 m_freem(rxs->rxs_mbuf);
2587 rxs->rxs_mbuf = NULL;
2588 }
2589 }
2590 }
2591
2592 /*
2593 * wm_stop: [ifnet interface function]
2594 *
2595 * Stop transmission on the interface.
2596 */
2597 static void
2598 wm_stop(struct ifnet *ifp, int disable)
2599 {
2600 struct wm_softc *sc = ifp->if_softc;
2601 struct wm_txsoft *txs;
2602 int i;
2603
2604 /* Stop the one second clock. */
2605 callout_stop(&sc->sc_tick_ch);
2606
2607 if (sc->sc_flags & WM_F_HAS_MII) {
2608 /* Down the MII. */
2609 mii_down(&sc->sc_mii);
2610 }
2611
2612 /* Stop the transmit and receive processes. */
2613 CSR_WRITE(sc, WMREG_TCTL, 0);
2614 CSR_WRITE(sc, WMREG_RCTL, 0);
2615
2616 /* Release any queued transmit buffers. */
2617 for (i = 0; i < WM_TXQUEUELEN; i++) {
2618 txs = &sc->sc_txsoft[i];
2619 if (txs->txs_mbuf != NULL) {
2620 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2621 m_freem(txs->txs_mbuf);
2622 txs->txs_mbuf = NULL;
2623 }
2624 }
2625
2626 if (disable)
2627 wm_rxdrain(sc);
2628
2629 /* Mark the interface as down and cancel the watchdog timer. */
2630 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2631 ifp->if_timer = 0;
2632 }
2633
2634 /*
2635 * wm_acquire_eeprom:
2636 *
2637 * Perform the EEPROM handshake required on some chips.
2638 */
2639 static int
2640 wm_acquire_eeprom(struct wm_softc *sc)
2641 {
2642 uint32_t reg;
2643 int x;
2644
2645 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2646 reg = CSR_READ(sc, WMREG_EECD);
2647
2648 /* Request EEPROM access. */
2649 reg |= EECD_EE_REQ;
2650 CSR_WRITE(sc, WMREG_EECD, reg);
2651
2652 /* ..and wait for it to be granted. */
2653 for (x = 0; x < 100; x++) {
2654 reg = CSR_READ(sc, WMREG_EECD);
2655 if (reg & EECD_EE_GNT)
2656 break;
2657 delay(5);
2658 }
2659 if ((reg & EECD_EE_GNT) == 0) {
2660 aprint_error("%s: could not acquire EEPROM GNT\n",
2661 sc->sc_dev.dv_xname);
2662 reg &= ~EECD_EE_REQ;
2663 CSR_WRITE(sc, WMREG_EECD, reg);
2664 return (1);
2665 }
2666 }
2667
2668 return (0);
2669 }
2670
2671 /*
2672 * wm_release_eeprom:
2673 *
2674 * Release the EEPROM mutex.
2675 */
2676 static void
2677 wm_release_eeprom(struct wm_softc *sc)
2678 {
2679 uint32_t reg;
2680
2681 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2682 reg = CSR_READ(sc, WMREG_EECD);
2683 reg &= ~EECD_EE_REQ;
2684 CSR_WRITE(sc, WMREG_EECD, reg);
2685 }
2686 }
2687
2688 /*
2689 * wm_eeprom_sendbits:
2690 *
2691 * Send a series of bits to the EEPROM.
2692 */
2693 static void
2694 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2695 {
2696 uint32_t reg;
2697 int x;
2698
2699 reg = CSR_READ(sc, WMREG_EECD);
2700
2701 for (x = nbits; x > 0; x--) {
2702 if (bits & (1U << (x - 1)))
2703 reg |= EECD_DI;
2704 else
2705 reg &= ~EECD_DI;
2706 CSR_WRITE(sc, WMREG_EECD, reg);
2707 delay(2);
2708 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2709 delay(2);
2710 CSR_WRITE(sc, WMREG_EECD, reg);
2711 delay(2);
2712 }
2713 }
2714
2715 /*
2716 * wm_eeprom_recvbits:
2717 *
2718 * Receive a series of bits from the EEPROM.
2719 */
2720 static void
2721 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2722 {
2723 uint32_t reg, val;
2724 int x;
2725
2726 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2727
2728 val = 0;
2729 for (x = nbits; x > 0; x--) {
2730 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2731 delay(2);
2732 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2733 val |= (1U << (x - 1));
2734 CSR_WRITE(sc, WMREG_EECD, reg);
2735 delay(2);
2736 }
2737 *valp = val;
2738 }
2739
2740 /*
2741 * wm_read_eeprom_uwire:
2742 *
2743 * Read a word from the EEPROM using the MicroWire protocol.
2744 */
2745 static int
2746 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2747 {
2748 uint32_t reg, val;
2749 int i;
2750
2751 for (i = 0; i < wordcnt; i++) {
2752 /* Clear SK and DI. */
2753 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2754 CSR_WRITE(sc, WMREG_EECD, reg);
2755
2756 /* Set CHIP SELECT. */
2757 reg |= EECD_CS;
2758 CSR_WRITE(sc, WMREG_EECD, reg);
2759 delay(2);
2760
2761 /* Shift in the READ command. */
2762 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2763
2764 /* Shift in address. */
2765 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2766
2767 /* Shift out the data. */
2768 wm_eeprom_recvbits(sc, &val, 16);
2769 data[i] = val & 0xffff;
2770
2771 /* Clear CHIP SELECT. */
2772 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2773 CSR_WRITE(sc, WMREG_EECD, reg);
2774 delay(2);
2775 }
2776
2777 return (0);
2778 }
2779
2780 /*
2781 * wm_spi_eeprom_ready:
2782 *
2783 * Wait for a SPI EEPROM to be ready for commands.
2784 */
2785 static int
2786 wm_spi_eeprom_ready(struct wm_softc *sc)
2787 {
2788 uint32_t val;
2789 int usec;
2790
2791 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2792 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2793 wm_eeprom_recvbits(sc, &val, 8);
2794 if ((val & SPI_SR_RDY) == 0)
2795 break;
2796 }
2797 if (usec >= SPI_MAX_RETRIES) {
2798 aprint_error("%s: EEPROM failed to become ready\n",
2799 sc->sc_dev.dv_xname);
2800 return (1);
2801 }
2802 return (0);
2803 }
2804
2805 /*
2806 * wm_read_eeprom_spi:
2807 *
2808 * Read a work from the EEPROM using the SPI protocol.
2809 */
2810 static int
2811 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2812 {
2813 uint32_t reg, val;
2814 int i;
2815 uint8_t opc;
2816
2817 /* Clear SK and CS. */
2818 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2819 CSR_WRITE(sc, WMREG_EECD, reg);
2820 delay(2);
2821
2822 if (wm_spi_eeprom_ready(sc))
2823 return (1);
2824
2825 /* Toggle CS to flush commands. */
2826 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2827 delay(2);
2828 CSR_WRITE(sc, WMREG_EECD, reg);
2829 delay(2);
2830
2831 opc = SPI_OPC_READ;
2832 if (sc->sc_ee_addrbits == 8 && word >= 128)
2833 opc |= SPI_OPC_A8;
2834
2835 wm_eeprom_sendbits(sc, opc, 8);
2836 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2837
2838 for (i = 0; i < wordcnt; i++) {
2839 wm_eeprom_recvbits(sc, &val, 16);
2840 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2841 }
2842
2843 /* Raise CS and clear SK. */
2844 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2845 CSR_WRITE(sc, WMREG_EECD, reg);
2846 delay(2);
2847
2848 return (0);
2849 }
2850
2851 /*
2852 * wm_read_eeprom:
2853 *
2854 * Read data from the serial EEPROM.
2855 */
2856 static int
2857 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2858 {
2859 int rv;
2860
2861 if (wm_acquire_eeprom(sc))
2862 return (1);
2863
2864 if (sc->sc_flags & WM_F_EEPROM_SPI)
2865 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2866 else
2867 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2868
2869 wm_release_eeprom(sc);
2870 return (rv);
2871 }
2872
2873 /*
2874 * wm_add_rxbuf:
2875 *
2876 * Add a receive buffer to the indiciated descriptor.
2877 */
2878 static int
2879 wm_add_rxbuf(struct wm_softc *sc, int idx)
2880 {
2881 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2882 struct mbuf *m;
2883 int error;
2884
2885 MGETHDR(m, M_DONTWAIT, MT_DATA);
2886 if (m == NULL)
2887 return (ENOBUFS);
2888
2889 MCLGET(m, M_DONTWAIT);
2890 if ((m->m_flags & M_EXT) == 0) {
2891 m_freem(m);
2892 return (ENOBUFS);
2893 }
2894
2895 if (rxs->rxs_mbuf != NULL)
2896 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2897
2898 rxs->rxs_mbuf = m;
2899
2900 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2901 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2902 BUS_DMA_READ|BUS_DMA_NOWAIT);
2903 if (error) {
2904 printf("%s: unable to load rx DMA map %d, error = %d\n",
2905 sc->sc_dev.dv_xname, idx, error);
2906 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2907 }
2908
2909 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2910 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2911
2912 WM_INIT_RXDESC(sc, idx);
2913
2914 return (0);
2915 }
2916
2917 /*
2918 * wm_set_ral:
2919 *
2920 * Set an entery in the receive address list.
2921 */
2922 static void
2923 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2924 {
2925 uint32_t ral_lo, ral_hi;
2926
2927 if (enaddr != NULL) {
2928 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2929 (enaddr[3] << 24);
2930 ral_hi = enaddr[4] | (enaddr[5] << 8);
2931 ral_hi |= RAL_AV;
2932 } else {
2933 ral_lo = 0;
2934 ral_hi = 0;
2935 }
2936
2937 if (sc->sc_type >= WM_T_82544) {
2938 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2939 ral_lo);
2940 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2941 ral_hi);
2942 } else {
2943 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2944 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2945 }
2946 }
2947
2948 /*
2949 * wm_mchash:
2950 *
2951 * Compute the hash of the multicast address for the 4096-bit
2952 * multicast filter.
2953 */
2954 static uint32_t
2955 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2956 {
2957 static const int lo_shift[4] = { 4, 3, 2, 0 };
2958 static const int hi_shift[4] = { 4, 5, 6, 8 };
2959 uint32_t hash;
2960
2961 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2962 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2963
2964 return (hash & 0xfff);
2965 }
2966
2967 /*
2968 * wm_set_filter:
2969 *
2970 * Set up the receive filter.
2971 */
2972 static void
2973 wm_set_filter(struct wm_softc *sc)
2974 {
2975 struct ethercom *ec = &sc->sc_ethercom;
2976 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2977 struct ether_multi *enm;
2978 struct ether_multistep step;
2979 bus_addr_t mta_reg;
2980 uint32_t hash, reg, bit;
2981 int i;
2982
2983 if (sc->sc_type >= WM_T_82544)
2984 mta_reg = WMREG_CORDOVA_MTA;
2985 else
2986 mta_reg = WMREG_MTA;
2987
2988 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2989
2990 if (ifp->if_flags & IFF_BROADCAST)
2991 sc->sc_rctl |= RCTL_BAM;
2992 if (ifp->if_flags & IFF_PROMISC) {
2993 sc->sc_rctl |= RCTL_UPE;
2994 goto allmulti;
2995 }
2996
2997 /*
2998 * Set the station address in the first RAL slot, and
2999 * clear the remaining slots.
3000 */
3001 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
3002 for (i = 1; i < WM_RAL_TABSIZE; i++)
3003 wm_set_ral(sc, NULL, i);
3004
3005 /* Clear out the multicast table. */
3006 for (i = 0; i < WM_MC_TABSIZE; i++)
3007 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3008
3009 ETHER_FIRST_MULTI(step, ec, enm);
3010 while (enm != NULL) {
3011 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3012 /*
3013 * We must listen to a range of multicast addresses.
3014 * For now, just accept all multicasts, rather than
3015 * trying to set only those filter bits needed to match
3016 * the range. (At this time, the only use of address
3017 * ranges is for IP multicast routing, for which the
3018 * range is big enough to require all bits set.)
3019 */
3020 goto allmulti;
3021 }
3022
3023 hash = wm_mchash(sc, enm->enm_addrlo);
3024
3025 reg = (hash >> 5) & 0x7f;
3026 bit = hash & 0x1f;
3027
3028 hash = CSR_READ(sc, mta_reg + (reg << 2));
3029 hash |= 1U << bit;
3030
3031 /* XXX Hardware bug?? */
3032 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3033 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3034 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3035 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3036 } else
3037 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3038
3039 ETHER_NEXT_MULTI(step, enm);
3040 }
3041
3042 ifp->if_flags &= ~IFF_ALLMULTI;
3043 goto setit;
3044
3045 allmulti:
3046 ifp->if_flags |= IFF_ALLMULTI;
3047 sc->sc_rctl |= RCTL_MPE;
3048
3049 setit:
3050 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3051 }
3052
3053 /*
3054 * wm_tbi_mediainit:
3055 *
3056 * Initialize media for use on 1000BASE-X devices.
3057 */
3058 static void
3059 wm_tbi_mediainit(struct wm_softc *sc)
3060 {
3061 const char *sep = "";
3062
3063 if (sc->sc_type < WM_T_82543)
3064 sc->sc_tipg = TIPG_WM_DFLT;
3065 else
3066 sc->sc_tipg = TIPG_LG_DFLT;
3067
3068 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3069 wm_tbi_mediastatus);
3070
3071 /*
3072 * SWD Pins:
3073 *
3074 * 0 = Link LED (output)
3075 * 1 = Loss Of Signal (input)
3076 */
3077 sc->sc_ctrl |= CTRL_SWDPIO(0);
3078 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3079
3080 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3081
3082 #define ADD(ss, mm, dd) \
3083 do { \
3084 printf("%s%s", sep, ss); \
3085 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3086 sep = ", "; \
3087 } while (/*CONSTCOND*/0)
3088
3089 printf("%s: ", sc->sc_dev.dv_xname);
3090 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3091 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3092 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3093 printf("\n");
3094
3095 #undef ADD
3096
3097 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3098 }
3099
3100 /*
3101 * wm_tbi_mediastatus: [ifmedia interface function]
3102 *
3103 * Get the current interface media status on a 1000BASE-X device.
3104 */
3105 static void
3106 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3107 {
3108 struct wm_softc *sc = ifp->if_softc;
3109 uint32_t ctrl;
3110
3111 ifmr->ifm_status = IFM_AVALID;
3112 ifmr->ifm_active = IFM_ETHER;
3113
3114 if (sc->sc_tbi_linkup == 0) {
3115 ifmr->ifm_active |= IFM_NONE;
3116 return;
3117 }
3118
3119 ifmr->ifm_status |= IFM_ACTIVE;
3120 ifmr->ifm_active |= IFM_1000_SX;
3121 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3122 ifmr->ifm_active |= IFM_FDX;
3123 ctrl = CSR_READ(sc, WMREG_CTRL);
3124 if (ctrl & CTRL_RFCE)
3125 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
3126 if (ctrl & CTRL_TFCE)
3127 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
3128 }
3129
3130 /*
3131 * wm_tbi_mediachange: [ifmedia interface function]
3132 *
3133 * Set hardware to newly-selected media on a 1000BASE-X device.
3134 */
3135 static int
3136 wm_tbi_mediachange(struct ifnet *ifp)
3137 {
3138 struct wm_softc *sc = ifp->if_softc;
3139 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3140 uint32_t status;
3141 int i;
3142
3143 sc->sc_txcw = ife->ifm_data;
3144 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
3145 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
3146 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
3147 sc->sc_txcw |= TXCW_ANE;
3148
3149 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3150 delay(10000);
3151
3152 /* NOTE: CTRL will update TFCE and RFCE automatically. */
3153
3154 sc->sc_tbi_anstate = 0;
3155
3156 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3157 /* Have signal; wait for the link to come up. */
3158 for (i = 0; i < 50; i++) {
3159 delay(10000);
3160 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3161 break;
3162 }
3163
3164 status = CSR_READ(sc, WMREG_STATUS);
3165 if (status & STATUS_LU) {
3166 /* Link is up. */
3167 DPRINTF(WM_DEBUG_LINK,
3168 ("%s: LINK: set media -> link up %s\n",
3169 sc->sc_dev.dv_xname,
3170 (status & STATUS_FD) ? "FDX" : "HDX"));
3171 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3172 sc->sc_fcrtl &= ~FCRTL_XONE;
3173 if (status & STATUS_FD)
3174 sc->sc_tctl |=
3175 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3176 else
3177 sc->sc_tctl |=
3178 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3179 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
3180 sc->sc_fcrtl |= FCRTL_XONE;
3181 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3182 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3183 WMREG_OLD_FCRTL : WMREG_FCRTL,
3184 sc->sc_fcrtl);
3185 sc->sc_tbi_linkup = 1;
3186 } else {
3187 /* Link is down. */
3188 DPRINTF(WM_DEBUG_LINK,
3189 ("%s: LINK: set media -> link down\n",
3190 sc->sc_dev.dv_xname));
3191 sc->sc_tbi_linkup = 0;
3192 }
3193 } else {
3194 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3195 sc->sc_dev.dv_xname));
3196 sc->sc_tbi_linkup = 0;
3197 }
3198
3199 wm_tbi_set_linkled(sc);
3200
3201 return (0);
3202 }
3203
3204 /*
3205 * wm_tbi_set_linkled:
3206 *
3207 * Update the link LED on 1000BASE-X devices.
3208 */
3209 static void
3210 wm_tbi_set_linkled(struct wm_softc *sc)
3211 {
3212
3213 if (sc->sc_tbi_linkup)
3214 sc->sc_ctrl |= CTRL_SWDPIN(0);
3215 else
3216 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3217
3218 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3219 }
3220
3221 /*
3222 * wm_tbi_check_link:
3223 *
3224 * Check the link on 1000BASE-X devices.
3225 */
3226 static void
3227 wm_tbi_check_link(struct wm_softc *sc)
3228 {
3229 uint32_t rxcw, ctrl, status;
3230
3231 if (sc->sc_tbi_anstate == 0)
3232 return;
3233 else if (sc->sc_tbi_anstate > 1) {
3234 DPRINTF(WM_DEBUG_LINK,
3235 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3236 sc->sc_tbi_anstate));
3237 sc->sc_tbi_anstate--;
3238 return;
3239 }
3240
3241 sc->sc_tbi_anstate = 0;
3242
3243 rxcw = CSR_READ(sc, WMREG_RXCW);
3244 ctrl = CSR_READ(sc, WMREG_CTRL);
3245 status = CSR_READ(sc, WMREG_STATUS);
3246
3247 if ((status & STATUS_LU) == 0) {
3248 DPRINTF(WM_DEBUG_LINK,
3249 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3250 sc->sc_tbi_linkup = 0;
3251 } else {
3252 DPRINTF(WM_DEBUG_LINK,
3253 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3254 (status & STATUS_FD) ? "FDX" : "HDX"));
3255 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3256 sc->sc_fcrtl &= ~FCRTL_XONE;
3257 if (status & STATUS_FD)
3258 sc->sc_tctl |=
3259 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3260 else
3261 sc->sc_tctl |=
3262 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3263 if (ctrl & CTRL_TFCE)
3264 sc->sc_fcrtl |= FCRTL_XONE;
3265 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3266 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3267 WMREG_OLD_FCRTL : WMREG_FCRTL,
3268 sc->sc_fcrtl);
3269 sc->sc_tbi_linkup = 1;
3270 }
3271
3272 wm_tbi_set_linkled(sc);
3273 }
3274
3275 /*
3276 * wm_gmii_reset:
3277 *
3278 * Reset the PHY.
3279 */
3280 static void
3281 wm_gmii_reset(struct wm_softc *sc)
3282 {
3283 uint32_t reg;
3284
3285 if (sc->sc_type >= WM_T_82544) {
3286 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3287 delay(20000);
3288
3289 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3290 delay(20000);
3291 } else {
3292 /* The PHY reset pin is active-low. */
3293 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3294 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3295 CTRL_EXT_SWDPIN(4));
3296 reg |= CTRL_EXT_SWDPIO(4);
3297
3298 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3299 delay(10);
3300
3301 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3302 delay(10);
3303
3304 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3305 delay(10);
3306 #if 0
3307 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3308 #endif
3309 }
3310 }
3311
3312 /*
3313 * wm_gmii_mediainit:
3314 *
3315 * Initialize media for use on 1000BASE-T devices.
3316 */
3317 static void
3318 wm_gmii_mediainit(struct wm_softc *sc)
3319 {
3320 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3321
3322 /* We have MII. */
3323 sc->sc_flags |= WM_F_HAS_MII;
3324
3325 sc->sc_tipg = TIPG_1000T_DFLT;
3326
3327 /*
3328 * Let the chip set speed/duplex on its own based on
3329 * signals from the PHY.
3330 */
3331 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3332 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3333
3334 /* Initialize our media structures and probe the GMII. */
3335 sc->sc_mii.mii_ifp = ifp;
3336
3337 if (sc->sc_type >= WM_T_82544) {
3338 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3339 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3340 } else {
3341 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3342 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3343 }
3344 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3345
3346 wm_gmii_reset(sc);
3347
3348 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3349 wm_gmii_mediastatus);
3350
3351 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3352 MII_OFFSET_ANY, MIIF_DOPAUSE);
3353 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3354 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3355 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3356 } else
3357 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3358 }
3359
3360 /*
3361 * wm_gmii_mediastatus: [ifmedia interface function]
3362 *
3363 * Get the current interface media status on a 1000BASE-T device.
3364 */
3365 static void
3366 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3367 {
3368 struct wm_softc *sc = ifp->if_softc;
3369
3370 mii_pollstat(&sc->sc_mii);
3371 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3372 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
3373 sc->sc_flowflags;
3374 }
3375
3376 /*
3377 * wm_gmii_mediachange: [ifmedia interface function]
3378 *
3379 * Set hardware to newly-selected media on a 1000BASE-T device.
3380 */
3381 static int
3382 wm_gmii_mediachange(struct ifnet *ifp)
3383 {
3384 struct wm_softc *sc = ifp->if_softc;
3385
3386 if (ifp->if_flags & IFF_UP)
3387 mii_mediachg(&sc->sc_mii);
3388 return (0);
3389 }
3390
3391 #define MDI_IO CTRL_SWDPIN(2)
3392 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3393 #define MDI_CLK CTRL_SWDPIN(3)
3394
3395 static void
3396 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3397 {
3398 uint32_t i, v;
3399
3400 v = CSR_READ(sc, WMREG_CTRL);
3401 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3402 v |= MDI_DIR | CTRL_SWDPIO(3);
3403
3404 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3405 if (data & i)
3406 v |= MDI_IO;
3407 else
3408 v &= ~MDI_IO;
3409 CSR_WRITE(sc, WMREG_CTRL, v);
3410 delay(10);
3411 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3412 delay(10);
3413 CSR_WRITE(sc, WMREG_CTRL, v);
3414 delay(10);
3415 }
3416 }
3417
3418 static uint32_t
3419 i82543_mii_recvbits(struct wm_softc *sc)
3420 {
3421 uint32_t v, i, data = 0;
3422
3423 v = CSR_READ(sc, WMREG_CTRL);
3424 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3425 v |= CTRL_SWDPIO(3);
3426
3427 CSR_WRITE(sc, WMREG_CTRL, v);
3428 delay(10);
3429 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3430 delay(10);
3431 CSR_WRITE(sc, WMREG_CTRL, v);
3432 delay(10);
3433
3434 for (i = 0; i < 16; i++) {
3435 data <<= 1;
3436 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3437 delay(10);
3438 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3439 data |= 1;
3440 CSR_WRITE(sc, WMREG_CTRL, v);
3441 delay(10);
3442 }
3443
3444 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3445 delay(10);
3446 CSR_WRITE(sc, WMREG_CTRL, v);
3447 delay(10);
3448
3449 return (data);
3450 }
3451
3452 #undef MDI_IO
3453 #undef MDI_DIR
3454 #undef MDI_CLK
3455
3456 /*
3457 * wm_gmii_i82543_readreg: [mii interface function]
3458 *
3459 * Read a PHY register on the GMII (i82543 version).
3460 */
3461 static int
3462 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3463 {
3464 struct wm_softc *sc = (void *) self;
3465 int rv;
3466
3467 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3468 i82543_mii_sendbits(sc, reg | (phy << 5) |
3469 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3470 rv = i82543_mii_recvbits(sc) & 0xffff;
3471
3472 DPRINTF(WM_DEBUG_GMII,
3473 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3474 sc->sc_dev.dv_xname, phy, reg, rv));
3475
3476 return (rv);
3477 }
3478
3479 /*
3480 * wm_gmii_i82543_writereg: [mii interface function]
3481 *
3482 * Write a PHY register on the GMII (i82543 version).
3483 */
3484 static void
3485 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3486 {
3487 struct wm_softc *sc = (void *) self;
3488
3489 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3490 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3491 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3492 (MII_COMMAND_START << 30), 32);
3493 }
3494
3495 /*
3496 * wm_gmii_i82544_readreg: [mii interface function]
3497 *
3498 * Read a PHY register on the GMII.
3499 */
3500 static int
3501 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3502 {
3503 struct wm_softc *sc = (void *) self;
3504 uint32_t mdic = 0;
3505 int i, rv;
3506
3507 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3508 MDIC_REGADD(reg));
3509
3510 for (i = 0; i < 100; i++) {
3511 mdic = CSR_READ(sc, WMREG_MDIC);
3512 if (mdic & MDIC_READY)
3513 break;
3514 delay(10);
3515 }
3516
3517 if ((mdic & MDIC_READY) == 0) {
3518 printf("%s: MDIC read timed out: phy %d reg %d\n",
3519 sc->sc_dev.dv_xname, phy, reg);
3520 rv = 0;
3521 } else if (mdic & MDIC_E) {
3522 #if 0 /* This is normal if no PHY is present. */
3523 printf("%s: MDIC read error: phy %d reg %d\n",
3524 sc->sc_dev.dv_xname, phy, reg);
3525 #endif
3526 rv = 0;
3527 } else {
3528 rv = MDIC_DATA(mdic);
3529 if (rv == 0xffff)
3530 rv = 0;
3531 }
3532
3533 return (rv);
3534 }
3535
3536 /*
3537 * wm_gmii_i82544_writereg: [mii interface function]
3538 *
3539 * Write a PHY register on the GMII.
3540 */
3541 static void
3542 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3543 {
3544 struct wm_softc *sc = (void *) self;
3545 uint32_t mdic = 0;
3546 int i;
3547
3548 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3549 MDIC_REGADD(reg) | MDIC_DATA(val));
3550
3551 for (i = 0; i < 100; i++) {
3552 mdic = CSR_READ(sc, WMREG_MDIC);
3553 if (mdic & MDIC_READY)
3554 break;
3555 delay(10);
3556 }
3557
3558 if ((mdic & MDIC_READY) == 0)
3559 printf("%s: MDIC write timed out: phy %d reg %d\n",
3560 sc->sc_dev.dv_xname, phy, reg);
3561 else if (mdic & MDIC_E)
3562 printf("%s: MDIC write error: phy %d reg %d\n",
3563 sc->sc_dev.dv_xname, phy, reg);
3564 }
3565
3566 /*
3567 * wm_gmii_statchg: [mii interface function]
3568 *
3569 * Callback from MII layer when media changes.
3570 */
3571 static void
3572 wm_gmii_statchg(struct device *self)
3573 {
3574 struct wm_softc *sc = (void *) self;
3575 struct mii_data *mii = &sc->sc_mii;
3576
3577 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
3578 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3579 sc->sc_fcrtl &= ~FCRTL_XONE;
3580
3581 /*
3582 * Get flow control negotiation result.
3583 */
3584 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3585 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3586 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3587 mii->mii_media_active &= ~IFM_ETH_FMASK;
3588 }
3589
3590 if (sc->sc_flowflags & IFM_FLOW) {
3591 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
3592 sc->sc_ctrl |= CTRL_TFCE;
3593 sc->sc_fcrtl |= FCRTL_XONE;
3594 }
3595 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
3596 sc->sc_ctrl |= CTRL_RFCE;
3597 }
3598
3599 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3600 DPRINTF(WM_DEBUG_LINK,
3601 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3602 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3603 } else {
3604 DPRINTF(WM_DEBUG_LINK,
3605 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3606 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3607 }
3608
3609 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3610 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3611 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
3612 : WMREG_FCRTL, sc->sc_fcrtl);
3613 }
3614