if_wm.c revision 1.68 1 /* $NetBSD: if_wm.c,v 1.68 2004/02/19 05:19:52 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out performance stability issue on i82547 (fvdl).
45 * - Figure out what to do with the i82545GM and i82546GB
46 * SERDES controllers.
47 * - Fix hw VLAN assist.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.68 2004/02/19 05:19:52 thorpej Exp $");
52
53 #include "bpfilter.h"
54 #include "rnd.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/callout.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/queue.h>
67
68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
69
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and manage
118 * up to 64 of them at a time. We allow up to 40 DMA segments per
119 * packet (there have been reports of jumbo frame packets with as
120 * many as 30 DMA segments!).
121 */
122 #define WM_NTXSEGS 40
123 #define WM_IFQUEUELEN 256
124 #define WM_TXQUEUELEN 64
125 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
126 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
127 #define WM_NTXDESC 256
128 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
129 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
130 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
131
132 /*
133 * Receive descriptor list size. We have one Rx buffer for normal
134 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
135 * packet. We allocate 256 receive descriptors, each with a 2k
136 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
137 */
138 #define WM_NRXDESC 256
139 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
140 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
141 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
142
143 /*
144 * Control structures are DMA'd to the i82542 chip. We allocate them in
145 * a single clump that maps to a single DMA segment to make serveral things
146 * easier.
147 */
148 struct wm_control_data {
149 /*
150 * The transmit descriptors.
151 */
152 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
153
154 /*
155 * The receive descriptors.
156 */
157 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
158 };
159
160 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
161 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
162 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
163
164 /*
165 * Software state for transmit jobs.
166 */
167 struct wm_txsoft {
168 struct mbuf *txs_mbuf; /* head of our mbuf chain */
169 bus_dmamap_t txs_dmamap; /* our DMA map */
170 int txs_firstdesc; /* first descriptor in packet */
171 int txs_lastdesc; /* last descriptor in packet */
172 int txs_ndesc; /* # of descriptors used */
173 };
174
175 /*
176 * Software state for receive buffers. Each descriptor gets a
177 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
178 * more than one buffer, we chain them together.
179 */
180 struct wm_rxsoft {
181 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
182 bus_dmamap_t rxs_dmamap; /* our DMA map */
183 };
184
185 typedef enum {
186 WM_T_unknown = 0,
187 WM_T_82542_2_0, /* i82542 2.0 (really old) */
188 WM_T_82542_2_1, /* i82542 2.1+ (old) */
189 WM_T_82543, /* i82543 */
190 WM_T_82544, /* i82544 */
191 WM_T_82540, /* i82540 */
192 WM_T_82545, /* i82545 */
193 WM_T_82545_3, /* i82545 3.0+ */
194 WM_T_82546, /* i82546 */
195 WM_T_82546_3, /* i82546 3.0+ */
196 WM_T_82541, /* i82541 */
197 WM_T_82541_2, /* i82541 2.0+ */
198 WM_T_82547, /* i82547 */
199 WM_T_82547_2, /* i82547 2.0+ */
200 } wm_chip_type;
201
202 /*
203 * Software state per device.
204 */
205 struct wm_softc {
206 struct device sc_dev; /* generic device information */
207 bus_space_tag_t sc_st; /* bus space tag */
208 bus_space_handle_t sc_sh; /* bus space handle */
209 bus_space_tag_t sc_iot; /* I/O space tag */
210 bus_space_handle_t sc_ioh; /* I/O space handle */
211 bus_dma_tag_t sc_dmat; /* bus DMA tag */
212 struct ethercom sc_ethercom; /* ethernet common data */
213 void *sc_sdhook; /* shutdown hook */
214
215 wm_chip_type sc_type; /* chip type */
216 int sc_flags; /* flags; see below */
217 int sc_bus_speed; /* PCI/PCIX bus speed */
218 int sc_pcix_offset; /* PCIX capability register offset */
219
220 void *sc_ih; /* interrupt cookie */
221
222 int sc_ee_addrbits; /* EEPROM address bits */
223
224 struct mii_data sc_mii; /* MII/media information */
225
226 struct callout sc_tick_ch; /* tick callout */
227
228 bus_dmamap_t sc_cddmamap; /* control data DMA map */
229 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
230
231 int sc_align_tweak;
232
233 /*
234 * Software state for the transmit and receive descriptors.
235 */
236 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
237 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
238
239 /*
240 * Control data structures.
241 */
242 struct wm_control_data *sc_control_data;
243 #define sc_txdescs sc_control_data->wcd_txdescs
244 #define sc_rxdescs sc_control_data->wcd_rxdescs
245
246 #ifdef WM_EVENT_COUNTERS
247 /* Event counters. */
248 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
249 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
250 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
251 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
252 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
253 struct evcnt sc_ev_rxintr; /* Rx interrupts */
254 struct evcnt sc_ev_linkintr; /* Link interrupts */
255
256 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
257 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
258 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
259 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
260
261 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
262 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
263 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
264
265 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
266 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
267
268 struct evcnt sc_ev_tu; /* Tx underrun */
269 #endif /* WM_EVENT_COUNTERS */
270
271 bus_addr_t sc_tdt_reg; /* offset of TDT register */
272
273 int sc_txfree; /* number of free Tx descriptors */
274 int sc_txnext; /* next ready Tx descriptor */
275
276 int sc_txsfree; /* number of free Tx jobs */
277 int sc_txsnext; /* next free Tx job */
278 int sc_txsdirty; /* dirty Tx jobs */
279
280 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
281 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
282
283 bus_addr_t sc_rdt_reg; /* offset of RDT register */
284
285 int sc_rxptr; /* next ready Rx descriptor/queue ent */
286 int sc_rxdiscard;
287 int sc_rxlen;
288 struct mbuf *sc_rxhead;
289 struct mbuf *sc_rxtail;
290 struct mbuf **sc_rxtailp;
291
292 uint32_t sc_ctrl; /* prototype CTRL register */
293 #if 0
294 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
295 #endif
296 uint32_t sc_icr; /* prototype interrupt bits */
297 uint32_t sc_tctl; /* prototype TCTL register */
298 uint32_t sc_rctl; /* prototype RCTL register */
299 uint32_t sc_txcw; /* prototype TXCW register */
300 uint32_t sc_tipg; /* prototype TIPG register */
301
302 int sc_tbi_linkup; /* TBI link status */
303 int sc_tbi_anstate; /* autonegotiation state */
304
305 int sc_mchash_type; /* multicast filter offset */
306
307 #if NRND > 0
308 rndsource_element_t rnd_source; /* random source */
309 #endif
310 };
311
312 #define WM_RXCHAIN_RESET(sc) \
313 do { \
314 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
315 *(sc)->sc_rxtailp = NULL; \
316 (sc)->sc_rxlen = 0; \
317 } while (/*CONSTCOND*/0)
318
319 #define WM_RXCHAIN_LINK(sc, m) \
320 do { \
321 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
322 (sc)->sc_rxtailp = &(m)->m_next; \
323 } while (/*CONSTCOND*/0)
324
325 /* sc_flags */
326 #define WM_F_HAS_MII 0x01 /* has MII */
327 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
328 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
329 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
330 #define WM_F_BUS64 0x20 /* bus is 64-bit */
331 #define WM_F_PCIX 0x40 /* bus is PCI-X */
332
333 #ifdef WM_EVENT_COUNTERS
334 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
335 #else
336 #define WM_EVCNT_INCR(ev) /* nothing */
337 #endif
338
339 #define CSR_READ(sc, reg) \
340 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
341 #define CSR_WRITE(sc, reg, val) \
342 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
343
344 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
345 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
346
347 #define WM_CDTXSYNC(sc, x, n, ops) \
348 do { \
349 int __x, __n; \
350 \
351 __x = (x); \
352 __n = (n); \
353 \
354 /* If it will wrap around, sync to the end of the ring. */ \
355 if ((__x + __n) > WM_NTXDESC) { \
356 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
357 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
358 (WM_NTXDESC - __x), (ops)); \
359 __n -= (WM_NTXDESC - __x); \
360 __x = 0; \
361 } \
362 \
363 /* Now sync whatever is left. */ \
364 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
365 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
366 } while (/*CONSTCOND*/0)
367
368 #define WM_CDRXSYNC(sc, x, ops) \
369 do { \
370 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
371 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
372 } while (/*CONSTCOND*/0)
373
374 #define WM_INIT_RXDESC(sc, x) \
375 do { \
376 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
377 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
378 struct mbuf *__m = __rxs->rxs_mbuf; \
379 \
380 /* \
381 * Note: We scoot the packet forward 2 bytes in the buffer \
382 * so that the payload after the Ethernet header is aligned \
383 * to a 4-byte boundary. \
384 * \
385 * XXX BRAINDAMAGE ALERT! \
386 * The stupid chip uses the same size for every buffer, which \
387 * is set in the Receive Control register. We are using the 2K \
388 * size option, but what we REALLY want is (2K - 2)! For this \
389 * reason, we can't "scoot" packets longer than the standard \
390 * Ethernet MTU. On strict-alignment platforms, if the total \
391 * size exceeds (2K - 2) we set align_tweak to 0 and let \
392 * the upper layer copy the headers. \
393 */ \
394 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
395 \
396 __rxd->wrx_addr.wa_low = \
397 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
398 (sc)->sc_align_tweak); \
399 __rxd->wrx_addr.wa_high = 0; \
400 __rxd->wrx_len = 0; \
401 __rxd->wrx_cksum = 0; \
402 __rxd->wrx_status = 0; \
403 __rxd->wrx_errors = 0; \
404 __rxd->wrx_special = 0; \
405 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
406 \
407 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
408 } while (/*CONSTCOND*/0)
409
410 static void wm_start(struct ifnet *);
411 static void wm_watchdog(struct ifnet *);
412 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
413 static int wm_init(struct ifnet *);
414 static void wm_stop(struct ifnet *, int);
415
416 static void wm_shutdown(void *);
417
418 static void wm_reset(struct wm_softc *);
419 static void wm_rxdrain(struct wm_softc *);
420 static int wm_add_rxbuf(struct wm_softc *, int);
421 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
422 static void wm_tick(void *);
423
424 static void wm_set_filter(struct wm_softc *);
425
426 static int wm_intr(void *);
427 static void wm_txintr(struct wm_softc *);
428 static void wm_rxintr(struct wm_softc *);
429 static void wm_linkintr(struct wm_softc *, uint32_t);
430
431 static void wm_tbi_mediainit(struct wm_softc *);
432 static int wm_tbi_mediachange(struct ifnet *);
433 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
434
435 static void wm_tbi_set_linkled(struct wm_softc *);
436 static void wm_tbi_check_link(struct wm_softc *);
437
438 static void wm_gmii_reset(struct wm_softc *);
439
440 static int wm_gmii_i82543_readreg(struct device *, int, int);
441 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
442
443 static int wm_gmii_i82544_readreg(struct device *, int, int);
444 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
445
446 static void wm_gmii_statchg(struct device *);
447
448 static void wm_gmii_mediainit(struct wm_softc *);
449 static int wm_gmii_mediachange(struct ifnet *);
450 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
451
452 static int wm_match(struct device *, struct cfdata *, void *);
453 static void wm_attach(struct device *, struct device *, void *);
454
455 CFATTACH_DECL(wm, sizeof(struct wm_softc),
456 wm_match, wm_attach, NULL, NULL);
457
458 /*
459 * Devices supported by this driver.
460 */
461 const struct wm_product {
462 pci_vendor_id_t wmp_vendor;
463 pci_product_id_t wmp_product;
464 const char *wmp_name;
465 wm_chip_type wmp_type;
466 int wmp_flags;
467 #define WMP_F_1000X 0x01
468 #define WMP_F_1000T 0x02
469 } wm_products[] = {
470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
471 "Intel i82542 1000BASE-X Ethernet",
472 WM_T_82542_2_1, WMP_F_1000X },
473
474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
475 "Intel i82543GC 1000BASE-X Ethernet",
476 WM_T_82543, WMP_F_1000X },
477
478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
479 "Intel i82543GC 1000BASE-T Ethernet",
480 WM_T_82543, WMP_F_1000T },
481
482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
483 "Intel i82544EI 1000BASE-T Ethernet",
484 WM_T_82544, WMP_F_1000T },
485
486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
487 "Intel i82544EI 1000BASE-X Ethernet",
488 WM_T_82544, WMP_F_1000X },
489
490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
491 "Intel i82544GC 1000BASE-T Ethernet",
492 WM_T_82544, WMP_F_1000T },
493
494 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
495 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
496 WM_T_82544, WMP_F_1000T },
497
498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
499 "Intel i82540EM 1000BASE-T Ethernet",
500 WM_T_82540, WMP_F_1000T },
501
502 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
503 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
504 WM_T_82540, WMP_F_1000T },
505
506 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
507 "Intel i82540EP 1000BASE-T Ethernet",
508 WM_T_82540, WMP_F_1000T },
509
510 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
511 "Intel i82540EP 1000BASE-T Ethernet",
512 WM_T_82540, WMP_F_1000T },
513
514 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
515 "Intel i82540EP 1000BASE-T Ethernet",
516 WM_T_82540, WMP_F_1000T },
517
518 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
519 "Intel i82545EM 1000BASE-T Ethernet",
520 WM_T_82545, WMP_F_1000T },
521
522 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
523 "Intel i82545GM 1000BASE-T Ethernet",
524 WM_T_82545_3, WMP_F_1000T },
525
526 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
527 "Intel i82545GM 1000BASE-X Ethernet",
528 WM_T_82545_3, WMP_F_1000X },
529 #if 0
530 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
531 "Intel i82545GM Gigabit Ethernet (SERDES)",
532 WM_T_82545_3, WMP_F_SERDES },
533 #endif
534 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
535 "Intel i82546EB 1000BASE-T Ethernet",
536 WM_T_82546, WMP_F_1000T },
537
538 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
539 "Intel i82546EB 1000BASE-T Ethernet",
540 WM_T_82546, WMP_F_1000T },
541
542 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
543 "Intel i82545EM 1000BASE-X Ethernet",
544 WM_T_82545, WMP_F_1000X },
545
546 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
547 "Intel i82546EB 1000BASE-X Ethernet",
548 WM_T_82546, WMP_F_1000X },
549
550 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
551 "Intel i82546GB 1000BASE-T Ethernet",
552 WM_T_82546_3, WMP_F_1000T },
553
554 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
555 "Intel i82546GB 1000BASE-X Ethernet",
556 WM_T_82546_3, WMP_F_1000X },
557 #if 0
558 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
559 "Intel i82546GB Gigabit Ethernet (SERDES)",
560 WM_T_82546_3, WMP_F_SERDES },
561 #endif
562 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
563 "Intel i82541EI 1000BASE-T Ethernet",
564 WM_T_82541, WMP_F_1000T },
565
566 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
567 "Intel i82541EI Mobile 1000BASE-T Ethernet",
568 WM_T_82541, WMP_F_1000T },
569
570 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
571 "Intel i82541ER 1000BASE-T Ethernet",
572 WM_T_82541_2, WMP_F_1000T },
573
574 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
575 "Intel i82541GI 1000BASE-T Ethernet",
576 WM_T_82541_2, WMP_F_1000T },
577
578 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
579 "Intel i82541GI Mobile 1000BASE-T Ethernet",
580 WM_T_82541_2, WMP_F_1000T },
581
582 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
583 "Intel i82547EI 1000BASE-T Ethernet",
584 WM_T_82547, WMP_F_1000T },
585
586 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
587 "Intel i82547GI 1000BASE-T Ethernet",
588 WM_T_82547_2, WMP_F_1000T },
589 { 0, 0,
590 NULL,
591 0, 0 },
592 };
593
594 #ifdef WM_EVENT_COUNTERS
595 #if WM_NTXSEGS != 40
596 #error Update wm_txseg_evcnt_names
597 #endif
598 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
599 "txseg1",
600 "txseg2",
601 "txseg3",
602 "txseg4",
603 "txseg5",
604 "txseg6",
605 "txseg7",
606 "txseg8",
607 "txseg9",
608 "txseg10",
609 "txseg11",
610 "txseg12",
611 "txseg13",
612 "txseg14",
613 "txseg15",
614 "txseg16",
615 "txseg17",
616 "txseg18",
617 "txseg19",
618 "txseg20",
619 "txseg21",
620 "txseg22",
621 "txseg23",
622 "txseg24",
623 "txseg25",
624 "txseg26",
625 "txseg27",
626 "txseg28",
627 "txseg29",
628 "txseg30",
629 "txseg31",
630 "txseg32",
631 "txseg33",
632 "txseg34",
633 "txseg35",
634 "txseg36",
635 "txseg37",
636 "txseg38",
637 "txseg39",
638 "txseg40",
639 };
640 #endif /* WM_EVENT_COUNTERS */
641
642 #if 0 /* Not currently used */
643 static __inline uint32_t
644 wm_io_read(struct wm_softc *sc, int reg)
645 {
646
647 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
648 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
649 }
650 #endif
651
652 static __inline void
653 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
654 {
655
656 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
657 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
658 }
659
660 static const struct wm_product *
661 wm_lookup(const struct pci_attach_args *pa)
662 {
663 const struct wm_product *wmp;
664
665 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
666 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
667 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
668 return (wmp);
669 }
670 return (NULL);
671 }
672
673 static int
674 wm_match(struct device *parent, struct cfdata *cf, void *aux)
675 {
676 struct pci_attach_args *pa = aux;
677
678 if (wm_lookup(pa) != NULL)
679 return (1);
680
681 return (0);
682 }
683
684 static void
685 wm_attach(struct device *parent, struct device *self, void *aux)
686 {
687 struct wm_softc *sc = (void *) self;
688 struct pci_attach_args *pa = aux;
689 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
690 pci_chipset_tag_t pc = pa->pa_pc;
691 pci_intr_handle_t ih;
692 const char *intrstr = NULL;
693 const char *eetype;
694 bus_space_tag_t memt;
695 bus_space_handle_t memh;
696 bus_dma_segment_t seg;
697 int memh_valid;
698 int i, rseg, error;
699 const struct wm_product *wmp;
700 uint8_t enaddr[ETHER_ADDR_LEN];
701 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
702 pcireg_t preg, memtype;
703 uint32_t reg;
704 int pmreg;
705
706 callout_init(&sc->sc_tick_ch);
707
708 wmp = wm_lookup(pa);
709 if (wmp == NULL) {
710 printf("\n");
711 panic("wm_attach: impossible");
712 }
713
714 sc->sc_dmat = pa->pa_dmat;
715
716 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
717 aprint_naive(": Ethernet controller\n");
718 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
719
720 sc->sc_type = wmp->wmp_type;
721 if (sc->sc_type < WM_T_82543) {
722 if (preg < 2) {
723 aprint_error("%s: i82542 must be at least rev. 2\n",
724 sc->sc_dev.dv_xname);
725 return;
726 }
727 if (preg < 3)
728 sc->sc_type = WM_T_82542_2_0;
729 }
730
731 /*
732 * Map the device. All devices support memory-mapped acccess,
733 * and it is really required for normal operation.
734 */
735 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
736 switch (memtype) {
737 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
738 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
739 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
740 memtype, 0, &memt, &memh, NULL, NULL) == 0);
741 break;
742 default:
743 memh_valid = 0;
744 }
745
746 if (memh_valid) {
747 sc->sc_st = memt;
748 sc->sc_sh = memh;
749 } else {
750 aprint_error("%s: unable to map device registers\n",
751 sc->sc_dev.dv_xname);
752 return;
753 }
754
755 /*
756 * In addition, i82544 and later support I/O mapped indirect
757 * register access. It is not desirable (nor supported in
758 * this driver) to use it for normal operation, though it is
759 * required to work around bugs in some chip versions.
760 */
761 if (sc->sc_type >= WM_T_82544) {
762 /* First we have to find the I/O BAR. */
763 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
764 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
765 PCI_MAPREG_TYPE_IO)
766 break;
767 }
768 if (i == PCI_MAPREG_END)
769 aprint_error("%s: WARNING: unable to find I/O BAR\n",
770 sc->sc_dev.dv_xname);
771 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
772 0, &sc->sc_iot, &sc->sc_ioh,
773 NULL, NULL) == 0)
774 sc->sc_flags |= WM_F_IOH_VALID;
775 else
776 aprint_error("%s: WARNING: unable to map I/O space\n",
777 sc->sc_dev.dv_xname);
778 }
779
780 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
781 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
782 preg |= PCI_COMMAND_MASTER_ENABLE;
783 if (sc->sc_type < WM_T_82542_2_1)
784 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
785 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
786
787 /* Get it out of power save mode, if needed. */
788 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
789 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
790 PCI_PMCSR_STATE_MASK;
791 if (preg == PCI_PMCSR_STATE_D3) {
792 /*
793 * The card has lost all configuration data in
794 * this state, so punt.
795 */
796 aprint_error("%s: unable to wake from power state D3\n",
797 sc->sc_dev.dv_xname);
798 return;
799 }
800 if (preg != PCI_PMCSR_STATE_D0) {
801 aprint_normal("%s: waking up from power state D%d\n",
802 sc->sc_dev.dv_xname, preg);
803 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
804 PCI_PMCSR_STATE_D0);
805 }
806 }
807
808 /*
809 * Map and establish our interrupt.
810 */
811 if (pci_intr_map(pa, &ih)) {
812 aprint_error("%s: unable to map interrupt\n",
813 sc->sc_dev.dv_xname);
814 return;
815 }
816 intrstr = pci_intr_string(pc, ih);
817 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
818 if (sc->sc_ih == NULL) {
819 aprint_error("%s: unable to establish interrupt",
820 sc->sc_dev.dv_xname);
821 if (intrstr != NULL)
822 aprint_normal(" at %s", intrstr);
823 aprint_normal("\n");
824 return;
825 }
826 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
827
828 /*
829 * Determine a few things about the bus we're connected to.
830 */
831 if (sc->sc_type < WM_T_82543) {
832 /* We don't really know the bus characteristics here. */
833 sc->sc_bus_speed = 33;
834 } else {
835 reg = CSR_READ(sc, WMREG_STATUS);
836 if (reg & STATUS_BUS64)
837 sc->sc_flags |= WM_F_BUS64;
838 if (sc->sc_type >= WM_T_82544 &&
839 (reg & STATUS_PCIX_MODE) != 0) {
840 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
841
842 sc->sc_flags |= WM_F_PCIX;
843 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
844 PCI_CAP_PCIX,
845 &sc->sc_pcix_offset, NULL) == 0)
846 aprint_error("%s: unable to find PCIX "
847 "capability\n", sc->sc_dev.dv_xname);
848 else if (sc->sc_type != WM_T_82545_3 &&
849 sc->sc_type != WM_T_82546_3) {
850 /*
851 * Work around a problem caused by the BIOS
852 * setting the max memory read byte count
853 * incorrectly.
854 */
855 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
856 sc->sc_pcix_offset + PCI_PCIX_CMD);
857 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
858 sc->sc_pcix_offset + PCI_PCIX_STATUS);
859
860 bytecnt =
861 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
862 PCI_PCIX_CMD_BYTECNT_SHIFT;
863 maxb =
864 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
865 PCI_PCIX_STATUS_MAXB_SHIFT;
866 if (bytecnt > maxb) {
867 aprint_verbose("%s: resetting PCI-X "
868 "MMRBC: %d -> %d\n",
869 sc->sc_dev.dv_xname,
870 512 << bytecnt, 512 << maxb);
871 pcix_cmd = (pcix_cmd &
872 ~PCI_PCIX_CMD_BYTECNT_MASK) |
873 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
874 pci_conf_write(pa->pa_pc, pa->pa_tag,
875 sc->sc_pcix_offset + PCI_PCIX_CMD,
876 pcix_cmd);
877 }
878 }
879 }
880 /*
881 * The quad port adapter is special; it has a PCIX-PCIX
882 * bridge on the board, and can run the secondary bus at
883 * a higher speed.
884 */
885 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
886 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
887 : 66;
888 } else if (sc->sc_flags & WM_F_PCIX) {
889 switch (reg & STATUS_PCIXSPD_MASK) {
890 case STATUS_PCIXSPD_50_66:
891 sc->sc_bus_speed = 66;
892 break;
893 case STATUS_PCIXSPD_66_100:
894 sc->sc_bus_speed = 100;
895 break;
896 case STATUS_PCIXSPD_100_133:
897 sc->sc_bus_speed = 133;
898 break;
899 default:
900 aprint_error(
901 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
902 sc->sc_dev.dv_xname,
903 reg & STATUS_PCIXSPD_MASK);
904 sc->sc_bus_speed = 66;
905 }
906 } else
907 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
908 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
909 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
910 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
911 }
912
913 /*
914 * Allocate the control data structures, and create and load the
915 * DMA map for it.
916 */
917 if ((error = bus_dmamem_alloc(sc->sc_dmat,
918 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
919 0)) != 0) {
920 aprint_error(
921 "%s: unable to allocate control data, error = %d\n",
922 sc->sc_dev.dv_xname, error);
923 goto fail_0;
924 }
925
926 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
927 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
928 0)) != 0) {
929 aprint_error("%s: unable to map control data, error = %d\n",
930 sc->sc_dev.dv_xname, error);
931 goto fail_1;
932 }
933
934 if ((error = bus_dmamap_create(sc->sc_dmat,
935 sizeof(struct wm_control_data), 1,
936 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
937 aprint_error("%s: unable to create control data DMA map, "
938 "error = %d\n", sc->sc_dev.dv_xname, error);
939 goto fail_2;
940 }
941
942 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
943 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
944 0)) != 0) {
945 aprint_error(
946 "%s: unable to load control data DMA map, error = %d\n",
947 sc->sc_dev.dv_xname, error);
948 goto fail_3;
949 }
950
951 /*
952 * Create the transmit buffer DMA maps.
953 */
954 for (i = 0; i < WM_TXQUEUELEN; i++) {
955 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
956 WM_NTXSEGS, MCLBYTES, 0, 0,
957 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
958 aprint_error("%s: unable to create Tx DMA map %d, "
959 "error = %d\n", sc->sc_dev.dv_xname, i, error);
960 goto fail_4;
961 }
962 }
963
964 /*
965 * Create the receive buffer DMA maps.
966 */
967 for (i = 0; i < WM_NRXDESC; i++) {
968 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
969 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
970 aprint_error("%s: unable to create Rx DMA map %d, "
971 "error = %d\n", sc->sc_dev.dv_xname, i, error);
972 goto fail_5;
973 }
974 sc->sc_rxsoft[i].rxs_mbuf = NULL;
975 }
976
977 /*
978 * Reset the chip to a known state.
979 */
980 wm_reset(sc);
981
982 /*
983 * Get some information about the EEPROM.
984 */
985 if (sc->sc_type >= WM_T_82540)
986 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
987 if (sc->sc_type <= WM_T_82544)
988 sc->sc_ee_addrbits = 6;
989 else if (sc->sc_type <= WM_T_82546_3) {
990 reg = CSR_READ(sc, WMREG_EECD);
991 if (reg & EECD_EE_SIZE)
992 sc->sc_ee_addrbits = 8;
993 else
994 sc->sc_ee_addrbits = 6;
995 } else if (sc->sc_type <= WM_T_82547_2) {
996 reg = CSR_READ(sc, WMREG_EECD);
997 if (reg & EECD_EE_TYPE) {
998 sc->sc_flags |= WM_F_EEPROM_SPI;
999 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1000 } else
1001 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1002 } else {
1003 /* Assume everything else is SPI. */
1004 reg = CSR_READ(sc, WMREG_EECD);
1005 sc->sc_flags |= WM_F_EEPROM_SPI;
1006 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1007 }
1008 if (sc->sc_flags & WM_F_EEPROM_SPI)
1009 eetype = "SPI";
1010 else
1011 eetype = "MicroWire";
1012 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1013 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1014 sc->sc_ee_addrbits, eetype);
1015
1016 /*
1017 * Read the Ethernet address from the EEPROM.
1018 */
1019 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1020 sizeof(myea) / sizeof(myea[0]), myea)) {
1021 aprint_error("%s: unable to read Ethernet address\n",
1022 sc->sc_dev.dv_xname);
1023 return;
1024 }
1025 enaddr[0] = myea[0] & 0xff;
1026 enaddr[1] = myea[0] >> 8;
1027 enaddr[2] = myea[1] & 0xff;
1028 enaddr[3] = myea[1] >> 8;
1029 enaddr[4] = myea[2] & 0xff;
1030 enaddr[5] = myea[2] >> 8;
1031
1032 /*
1033 * Toggle the LSB of the MAC address on the second port
1034 * of the i82546.
1035 */
1036 if (sc->sc_type == WM_T_82546) {
1037 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1038 enaddr[5] ^= 1;
1039 }
1040
1041 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1042 ether_sprintf(enaddr));
1043
1044 /*
1045 * Read the config info from the EEPROM, and set up various
1046 * bits in the control registers based on their contents.
1047 */
1048 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1049 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1050 sc->sc_dev.dv_xname);
1051 return;
1052 }
1053 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1054 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1055 sc->sc_dev.dv_xname);
1056 return;
1057 }
1058 if (sc->sc_type >= WM_T_82544) {
1059 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1060 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1061 sc->sc_dev.dv_xname);
1062 return;
1063 }
1064 }
1065
1066 if (cfg1 & EEPROM_CFG1_ILOS)
1067 sc->sc_ctrl |= CTRL_ILOS;
1068 if (sc->sc_type >= WM_T_82544) {
1069 sc->sc_ctrl |=
1070 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1071 CTRL_SWDPIO_SHIFT;
1072 sc->sc_ctrl |=
1073 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1074 CTRL_SWDPINS_SHIFT;
1075 } else {
1076 sc->sc_ctrl |=
1077 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1078 CTRL_SWDPIO_SHIFT;
1079 }
1080
1081 #if 0
1082 if (sc->sc_type >= WM_T_82544) {
1083 if (cfg1 & EEPROM_CFG1_IPS0)
1084 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1085 if (cfg1 & EEPROM_CFG1_IPS1)
1086 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1087 sc->sc_ctrl_ext |=
1088 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1089 CTRL_EXT_SWDPIO_SHIFT;
1090 sc->sc_ctrl_ext |=
1091 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1092 CTRL_EXT_SWDPINS_SHIFT;
1093 } else {
1094 sc->sc_ctrl_ext |=
1095 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1096 CTRL_EXT_SWDPIO_SHIFT;
1097 }
1098 #endif
1099
1100 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1101 #if 0
1102 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1103 #endif
1104
1105 /*
1106 * Set up some register offsets that are different between
1107 * the i82542 and the i82543 and later chips.
1108 */
1109 if (sc->sc_type < WM_T_82543) {
1110 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1111 sc->sc_tdt_reg = WMREG_OLD_TDT;
1112 } else {
1113 sc->sc_rdt_reg = WMREG_RDT;
1114 sc->sc_tdt_reg = WMREG_TDT;
1115 }
1116
1117 /*
1118 * Determine if we should use flow control. We should
1119 * always use it, unless we're on a i82542 < 2.1.
1120 */
1121 if (sc->sc_type >= WM_T_82542_2_1)
1122 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1123
1124 /*
1125 * Determine if we're TBI or GMII mode, and initialize the
1126 * media structures accordingly.
1127 */
1128 if (sc->sc_type < WM_T_82543 ||
1129 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1130 if (wmp->wmp_flags & WMP_F_1000T)
1131 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1132 "product!\n", sc->sc_dev.dv_xname);
1133 wm_tbi_mediainit(sc);
1134 } else {
1135 if (wmp->wmp_flags & WMP_F_1000X)
1136 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1137 "product!\n", sc->sc_dev.dv_xname);
1138 wm_gmii_mediainit(sc);
1139 }
1140
1141 ifp = &sc->sc_ethercom.ec_if;
1142 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1143 ifp->if_softc = sc;
1144 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1145 ifp->if_ioctl = wm_ioctl;
1146 ifp->if_start = wm_start;
1147 ifp->if_watchdog = wm_watchdog;
1148 ifp->if_init = wm_init;
1149 ifp->if_stop = wm_stop;
1150 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1151 IFQ_SET_READY(&ifp->if_snd);
1152
1153 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1154
1155 /*
1156 * If we're a i82543 or greater, we can support VLANs.
1157 */
1158 if (sc->sc_type >= WM_T_82543)
1159 sc->sc_ethercom.ec_capabilities |=
1160 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1161
1162 /*
1163 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1164 * on i82543 and later.
1165 */
1166 if (sc->sc_type >= WM_T_82543)
1167 ifp->if_capabilities |=
1168 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1169
1170 /*
1171 * Attach the interface.
1172 */
1173 if_attach(ifp);
1174 ether_ifattach(ifp, enaddr);
1175 #if NRND > 0
1176 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1177 RND_TYPE_NET, 0);
1178 #endif
1179
1180 #ifdef WM_EVENT_COUNTERS
1181 /* Attach event counters. */
1182 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1183 NULL, sc->sc_dev.dv_xname, "txsstall");
1184 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1185 NULL, sc->sc_dev.dv_xname, "txdstall");
1186 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1187 NULL, sc->sc_dev.dv_xname, "txforceintr");
1188 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1189 NULL, sc->sc_dev.dv_xname, "txdw");
1190 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1191 NULL, sc->sc_dev.dv_xname, "txqe");
1192 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1193 NULL, sc->sc_dev.dv_xname, "rxintr");
1194 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1195 NULL, sc->sc_dev.dv_xname, "linkintr");
1196
1197 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1198 NULL, sc->sc_dev.dv_xname, "rxipsum");
1199 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1200 NULL, sc->sc_dev.dv_xname, "rxtusum");
1201 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1202 NULL, sc->sc_dev.dv_xname, "txipsum");
1203 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1204 NULL, sc->sc_dev.dv_xname, "txtusum");
1205
1206 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1207 NULL, sc->sc_dev.dv_xname, "txctx init");
1208 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1209 NULL, sc->sc_dev.dv_xname, "txctx hit");
1210 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1211 NULL, sc->sc_dev.dv_xname, "txctx miss");
1212
1213 for (i = 0; i < WM_NTXSEGS; i++)
1214 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1215 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1216
1217 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1218 NULL, sc->sc_dev.dv_xname, "txdrop");
1219
1220 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1221 NULL, sc->sc_dev.dv_xname, "tu");
1222 #endif /* WM_EVENT_COUNTERS */
1223
1224 /*
1225 * Make sure the interface is shutdown during reboot.
1226 */
1227 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1228 if (sc->sc_sdhook == NULL)
1229 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1230 sc->sc_dev.dv_xname);
1231 return;
1232
1233 /*
1234 * Free any resources we've allocated during the failed attach
1235 * attempt. Do this in reverse order and fall through.
1236 */
1237 fail_5:
1238 for (i = 0; i < WM_NRXDESC; i++) {
1239 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1240 bus_dmamap_destroy(sc->sc_dmat,
1241 sc->sc_rxsoft[i].rxs_dmamap);
1242 }
1243 fail_4:
1244 for (i = 0; i < WM_TXQUEUELEN; i++) {
1245 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1246 bus_dmamap_destroy(sc->sc_dmat,
1247 sc->sc_txsoft[i].txs_dmamap);
1248 }
1249 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1250 fail_3:
1251 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1252 fail_2:
1253 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1254 sizeof(struct wm_control_data));
1255 fail_1:
1256 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1257 fail_0:
1258 return;
1259 }
1260
1261 /*
1262 * wm_shutdown:
1263 *
1264 * Make sure the interface is stopped at reboot time.
1265 */
1266 static void
1267 wm_shutdown(void *arg)
1268 {
1269 struct wm_softc *sc = arg;
1270
1271 wm_stop(&sc->sc_ethercom.ec_if, 1);
1272 }
1273
1274 /*
1275 * wm_tx_cksum:
1276 *
1277 * Set up TCP/IP checksumming parameters for the
1278 * specified packet.
1279 */
1280 static int
1281 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1282 uint8_t *fieldsp)
1283 {
1284 struct mbuf *m0 = txs->txs_mbuf;
1285 struct livengood_tcpip_ctxdesc *t;
1286 uint32_t ipcs, tucs;
1287 struct ip *ip;
1288 struct ether_header *eh;
1289 int offset, iphl;
1290 uint8_t fields = 0;
1291
1292 /*
1293 * XXX It would be nice if the mbuf pkthdr had offset
1294 * fields for the protocol headers.
1295 */
1296
1297 eh = mtod(m0, struct ether_header *);
1298 switch (htons(eh->ether_type)) {
1299 case ETHERTYPE_IP:
1300 iphl = sizeof(struct ip);
1301 offset = ETHER_HDR_LEN;
1302 break;
1303
1304 case ETHERTYPE_VLAN:
1305 iphl = sizeof(struct ip);
1306 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1307 break;
1308
1309 default:
1310 /*
1311 * Don't support this protocol or encapsulation.
1312 */
1313 *fieldsp = 0;
1314 *cmdp = 0;
1315 return (0);
1316 }
1317
1318 if (m0->m_len < (offset + iphl)) {
1319 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1320 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1321 "packet dropped\n", sc->sc_dev.dv_xname);
1322 return (ENOMEM);
1323 }
1324 m0 = txs->txs_mbuf;
1325 }
1326
1327 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1328 iphl = ip->ip_hl << 2;
1329
1330 /*
1331 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1332 * offload feature, if we load the context descriptor, we
1333 * MUST provide valid values for IPCSS and TUCSS fields.
1334 */
1335
1336 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1337 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1338 fields |= WTX_IXSM;
1339 ipcs = WTX_TCPIP_IPCSS(offset) |
1340 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1341 WTX_TCPIP_IPCSE(offset + iphl - 1);
1342 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1343 /* Use the cached value. */
1344 ipcs = sc->sc_txctx_ipcs;
1345 } else {
1346 /* Just initialize it to the likely value anyway. */
1347 ipcs = WTX_TCPIP_IPCSS(offset) |
1348 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1349 WTX_TCPIP_IPCSE(offset + iphl - 1);
1350 }
1351
1352 offset += iphl;
1353
1354 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1355 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1356 fields |= WTX_TXSM;
1357 tucs = WTX_TCPIP_TUCSS(offset) |
1358 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1359 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1360 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1361 /* Use the cached value. */
1362 tucs = sc->sc_txctx_tucs;
1363 } else {
1364 /* Just initialize it to a valid TCP context. */
1365 tucs = WTX_TCPIP_TUCSS(offset) |
1366 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1367 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1368 }
1369
1370 if (sc->sc_txctx_ipcs == ipcs &&
1371 sc->sc_txctx_tucs == tucs) {
1372 /* Cached context is fine. */
1373 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1374 } else {
1375 /* Fill in the context descriptor. */
1376 #ifdef WM_EVENT_COUNTERS
1377 if (sc->sc_txctx_ipcs == 0xffffffff &&
1378 sc->sc_txctx_tucs == 0xffffffff)
1379 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1380 else
1381 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1382 #endif
1383 t = (struct livengood_tcpip_ctxdesc *)
1384 &sc->sc_txdescs[sc->sc_txnext];
1385 t->tcpip_ipcs = htole32(ipcs);
1386 t->tcpip_tucs = htole32(tucs);
1387 t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1388 t->tcpip_seg = 0;
1389 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1390
1391 sc->sc_txctx_ipcs = ipcs;
1392 sc->sc_txctx_tucs = tucs;
1393
1394 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1395 txs->txs_ndesc++;
1396 }
1397
1398 *cmdp = WTX_CMD_DEXT | WTX_DTYP_D;
1399 *fieldsp = fields;
1400
1401 return (0);
1402 }
1403
1404 /*
1405 * wm_start: [ifnet interface function]
1406 *
1407 * Start packet transmission on the interface.
1408 */
1409 static void
1410 wm_start(struct ifnet *ifp)
1411 {
1412 struct wm_softc *sc = ifp->if_softc;
1413 struct mbuf *m0;
1414 #if 0 /* XXXJRT */
1415 struct m_tag *mtag;
1416 #endif
1417 struct wm_txsoft *txs;
1418 bus_dmamap_t dmamap;
1419 int error, nexttx, lasttx = -1, ofree, seg;
1420 uint32_t cksumcmd;
1421 uint8_t cksumfields;
1422
1423 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1424 return;
1425
1426 /*
1427 * Remember the previous number of free descriptors.
1428 */
1429 ofree = sc->sc_txfree;
1430
1431 /*
1432 * Loop through the send queue, setting up transmit descriptors
1433 * until we drain the queue, or use up all available transmit
1434 * descriptors.
1435 */
1436 for (;;) {
1437 /* Grab a packet off the queue. */
1438 IFQ_POLL(&ifp->if_snd, m0);
1439 if (m0 == NULL)
1440 break;
1441
1442 DPRINTF(WM_DEBUG_TX,
1443 ("%s: TX: have packet to transmit: %p\n",
1444 sc->sc_dev.dv_xname, m0));
1445
1446 /* Get a work queue entry. */
1447 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1448 wm_txintr(sc);
1449 if (sc->sc_txsfree == 0) {
1450 DPRINTF(WM_DEBUG_TX,
1451 ("%s: TX: no free job descriptors\n",
1452 sc->sc_dev.dv_xname));
1453 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1454 break;
1455 }
1456 }
1457
1458 txs = &sc->sc_txsoft[sc->sc_txsnext];
1459 dmamap = txs->txs_dmamap;
1460
1461 /*
1462 * Load the DMA map. If this fails, the packet either
1463 * didn't fit in the allotted number of segments, or we
1464 * were short on resources. For the too-many-segments
1465 * case, we simply report an error and drop the packet,
1466 * since we can't sanely copy a jumbo packet to a single
1467 * buffer.
1468 */
1469 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1470 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1471 if (error) {
1472 if (error == EFBIG) {
1473 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1474 printf("%s: Tx packet consumes too many "
1475 "DMA segments, dropping...\n",
1476 sc->sc_dev.dv_xname);
1477 IFQ_DEQUEUE(&ifp->if_snd, m0);
1478 m_freem(m0);
1479 continue;
1480 }
1481 /*
1482 * Short on resources, just stop for now.
1483 */
1484 DPRINTF(WM_DEBUG_TX,
1485 ("%s: TX: dmamap load failed: %d\n",
1486 sc->sc_dev.dv_xname, error));
1487 break;
1488 }
1489
1490 /*
1491 * Ensure we have enough descriptors free to describe
1492 * the packet. Note, we always reserve one descriptor
1493 * at the end of the ring due to the semantics of the
1494 * TDT register, plus one more in the event we need
1495 * to re-load checksum offload context.
1496 */
1497 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1498 /*
1499 * Not enough free descriptors to transmit this
1500 * packet. We haven't committed anything yet,
1501 * so just unload the DMA map, put the packet
1502 * pack on the queue, and punt. Notify the upper
1503 * layer that there are no more slots left.
1504 */
1505 DPRINTF(WM_DEBUG_TX,
1506 ("%s: TX: need %d descriptors, have %d\n",
1507 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1508 sc->sc_txfree - 1));
1509 ifp->if_flags |= IFF_OACTIVE;
1510 bus_dmamap_unload(sc->sc_dmat, dmamap);
1511 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1512 break;
1513 }
1514
1515 IFQ_DEQUEUE(&ifp->if_snd, m0);
1516
1517 /*
1518 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1519 */
1520
1521 /* Sync the DMA map. */
1522 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1523 BUS_DMASYNC_PREWRITE);
1524
1525 DPRINTF(WM_DEBUG_TX,
1526 ("%s: TX: packet has %d DMA segments\n",
1527 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1528
1529 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1530
1531 /*
1532 * Store a pointer to the packet so that we can free it
1533 * later.
1534 *
1535 * Initially, we consider the number of descriptors the
1536 * packet uses the number of DMA segments. This may be
1537 * incremented by 1 if we do checksum offload (a descriptor
1538 * is used to set the checksum context).
1539 */
1540 txs->txs_mbuf = m0;
1541 txs->txs_firstdesc = sc->sc_txnext;
1542 txs->txs_ndesc = dmamap->dm_nsegs;
1543
1544 /*
1545 * Set up checksum offload parameters for
1546 * this packet.
1547 */
1548 if (m0->m_pkthdr.csum_flags &
1549 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1550 if (wm_tx_cksum(sc, txs, &cksumcmd,
1551 &cksumfields) != 0) {
1552 /* Error message already displayed. */
1553 bus_dmamap_unload(sc->sc_dmat, dmamap);
1554 continue;
1555 }
1556 } else {
1557 cksumcmd = 0;
1558 cksumfields = 0;
1559 }
1560
1561 cksumcmd |= WTX_CMD_IDE;
1562
1563 /*
1564 * Initialize the transmit descriptor.
1565 */
1566 for (nexttx = sc->sc_txnext, seg = 0;
1567 seg < dmamap->dm_nsegs;
1568 seg++, nexttx = WM_NEXTTX(nexttx)) {
1569 /*
1570 * Note: we currently only use 32-bit DMA
1571 * addresses.
1572 */
1573 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1574 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1575 htole32(dmamap->dm_segs[seg].ds_addr);
1576 sc->sc_txdescs[nexttx].wtx_cmdlen =
1577 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1578 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
1579 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
1580 cksumfields;
1581 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
1582 lasttx = nexttx;
1583
1584 DPRINTF(WM_DEBUG_TX,
1585 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1586 sc->sc_dev.dv_xname, nexttx,
1587 le32toh(dmamap->dm_segs[seg].ds_addr),
1588 le32toh(dmamap->dm_segs[seg].ds_len)));
1589 }
1590
1591 KASSERT(lasttx != -1);
1592
1593 /*
1594 * Set up the command byte on the last descriptor of
1595 * the packet. If we're in the interrupt delay window,
1596 * delay the interrupt.
1597 */
1598 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1599 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1600
1601 #if 0 /* XXXJRT */
1602 /*
1603 * If VLANs are enabled and the packet has a VLAN tag, set
1604 * up the descriptor to encapsulate the packet for us.
1605 *
1606 * This is only valid on the last descriptor of the packet.
1607 */
1608 if (sc->sc_ethercom.ec_nvlans != 0 &&
1609 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1610 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1611 htole32(WTX_CMD_VLE);
1612 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
1613 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1614 }
1615 #endif /* XXXJRT */
1616
1617 txs->txs_lastdesc = lasttx;
1618
1619 DPRINTF(WM_DEBUG_TX,
1620 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1621 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
1622
1623 /* Sync the descriptors we're using. */
1624 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1625 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1626
1627 /* Give the packet to the chip. */
1628 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1629
1630 DPRINTF(WM_DEBUG_TX,
1631 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1632
1633 DPRINTF(WM_DEBUG_TX,
1634 ("%s: TX: finished transmitting packet, job %d\n",
1635 sc->sc_dev.dv_xname, sc->sc_txsnext));
1636
1637 /* Advance the tx pointer. */
1638 sc->sc_txfree -= txs->txs_ndesc;
1639 sc->sc_txnext = nexttx;
1640
1641 sc->sc_txsfree--;
1642 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1643
1644 #if NBPFILTER > 0
1645 /* Pass the packet to any BPF listeners. */
1646 if (ifp->if_bpf)
1647 bpf_mtap(ifp->if_bpf, m0);
1648 #endif /* NBPFILTER > 0 */
1649 }
1650
1651 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1652 /* No more slots; notify upper layer. */
1653 ifp->if_flags |= IFF_OACTIVE;
1654 }
1655
1656 if (sc->sc_txfree != ofree) {
1657 /* Set a watchdog timer in case the chip flakes out. */
1658 ifp->if_timer = 5;
1659 }
1660 }
1661
1662 /*
1663 * wm_watchdog: [ifnet interface function]
1664 *
1665 * Watchdog timer handler.
1666 */
1667 static void
1668 wm_watchdog(struct ifnet *ifp)
1669 {
1670 struct wm_softc *sc = ifp->if_softc;
1671
1672 /*
1673 * Since we're using delayed interrupts, sweep up
1674 * before we report an error.
1675 */
1676 wm_txintr(sc);
1677
1678 if (sc->sc_txfree != WM_NTXDESC) {
1679 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1680 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1681 sc->sc_txnext);
1682 ifp->if_oerrors++;
1683
1684 /* Reset the interface. */
1685 (void) wm_init(ifp);
1686 }
1687
1688 /* Try to get more packets going. */
1689 wm_start(ifp);
1690 }
1691
1692 /*
1693 * wm_ioctl: [ifnet interface function]
1694 *
1695 * Handle control requests from the operator.
1696 */
1697 static int
1698 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1699 {
1700 struct wm_softc *sc = ifp->if_softc;
1701 struct ifreq *ifr = (struct ifreq *) data;
1702 int s, error;
1703
1704 s = splnet();
1705
1706 switch (cmd) {
1707 case SIOCSIFMEDIA:
1708 case SIOCGIFMEDIA:
1709 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1710 break;
1711 default:
1712 error = ether_ioctl(ifp, cmd, data);
1713 if (error == ENETRESET) {
1714 /*
1715 * Multicast list has changed; set the hardware filter
1716 * accordingly.
1717 */
1718 wm_set_filter(sc);
1719 error = 0;
1720 }
1721 break;
1722 }
1723
1724 /* Try to get more packets going. */
1725 wm_start(ifp);
1726
1727 splx(s);
1728 return (error);
1729 }
1730
1731 /*
1732 * wm_intr:
1733 *
1734 * Interrupt service routine.
1735 */
1736 static int
1737 wm_intr(void *arg)
1738 {
1739 struct wm_softc *sc = arg;
1740 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1741 uint32_t icr;
1742 int wantinit, handled = 0;
1743
1744 for (wantinit = 0; wantinit == 0;) {
1745 icr = CSR_READ(sc, WMREG_ICR);
1746 if ((icr & sc->sc_icr) == 0)
1747 break;
1748
1749 #if 0 /*NRND > 0*/
1750 if (RND_ENABLED(&sc->rnd_source))
1751 rnd_add_uint32(&sc->rnd_source, icr);
1752 #endif
1753
1754 handled = 1;
1755
1756 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1757 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1758 DPRINTF(WM_DEBUG_RX,
1759 ("%s: RX: got Rx intr 0x%08x\n",
1760 sc->sc_dev.dv_xname,
1761 icr & (ICR_RXDMT0|ICR_RXT0)));
1762 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1763 }
1764 #endif
1765 wm_rxintr(sc);
1766
1767 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1768 if (icr & ICR_TXDW) {
1769 DPRINTF(WM_DEBUG_TX,
1770 ("%s: TX: got TXDW interrupt\n",
1771 sc->sc_dev.dv_xname));
1772 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1773 }
1774 #endif
1775 wm_txintr(sc);
1776
1777 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1778 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1779 wm_linkintr(sc, icr);
1780 }
1781
1782 if (icr & ICR_RXO) {
1783 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1784 wantinit = 1;
1785 }
1786 }
1787
1788 if (handled) {
1789 if (wantinit)
1790 wm_init(ifp);
1791
1792 /* Try to get more packets going. */
1793 wm_start(ifp);
1794 }
1795
1796 return (handled);
1797 }
1798
1799 /*
1800 * wm_txintr:
1801 *
1802 * Helper; handle transmit interrupts.
1803 */
1804 static void
1805 wm_txintr(struct wm_softc *sc)
1806 {
1807 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1808 struct wm_txsoft *txs;
1809 uint8_t status;
1810 int i;
1811
1812 ifp->if_flags &= ~IFF_OACTIVE;
1813
1814 /*
1815 * Go through the Tx list and free mbufs for those
1816 * frames which have been transmitted.
1817 */
1818 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1819 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1820 txs = &sc->sc_txsoft[i];
1821
1822 DPRINTF(WM_DEBUG_TX,
1823 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1824
1825 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1826 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1827
1828 status =
1829 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
1830 if ((status & WTX_ST_DD) == 0) {
1831 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1832 BUS_DMASYNC_PREREAD);
1833 break;
1834 }
1835
1836 DPRINTF(WM_DEBUG_TX,
1837 ("%s: TX: job %d done: descs %d..%d\n",
1838 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1839 txs->txs_lastdesc));
1840
1841 /*
1842 * XXX We should probably be using the statistics
1843 * XXX registers, but I don't know if they exist
1844 * XXX on chips before the i82544.
1845 */
1846
1847 #ifdef WM_EVENT_COUNTERS
1848 if (status & WTX_ST_TU)
1849 WM_EVCNT_INCR(&sc->sc_ev_tu);
1850 #endif /* WM_EVENT_COUNTERS */
1851
1852 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1853 ifp->if_oerrors++;
1854 if (status & WTX_ST_LC)
1855 printf("%s: late collision\n",
1856 sc->sc_dev.dv_xname);
1857 else if (status & WTX_ST_EC) {
1858 ifp->if_collisions += 16;
1859 printf("%s: excessive collisions\n",
1860 sc->sc_dev.dv_xname);
1861 }
1862 } else
1863 ifp->if_opackets++;
1864
1865 sc->sc_txfree += txs->txs_ndesc;
1866 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1867 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1868 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1869 m_freem(txs->txs_mbuf);
1870 txs->txs_mbuf = NULL;
1871 }
1872
1873 /* Update the dirty transmit buffer pointer. */
1874 sc->sc_txsdirty = i;
1875 DPRINTF(WM_DEBUG_TX,
1876 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1877
1878 /*
1879 * If there are no more pending transmissions, cancel the watchdog
1880 * timer.
1881 */
1882 if (sc->sc_txsfree == WM_TXQUEUELEN)
1883 ifp->if_timer = 0;
1884 }
1885
1886 /*
1887 * wm_rxintr:
1888 *
1889 * Helper; handle receive interrupts.
1890 */
1891 static void
1892 wm_rxintr(struct wm_softc *sc)
1893 {
1894 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1895 struct wm_rxsoft *rxs;
1896 struct mbuf *m;
1897 int i, len;
1898 uint8_t status, errors;
1899
1900 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1901 rxs = &sc->sc_rxsoft[i];
1902
1903 DPRINTF(WM_DEBUG_RX,
1904 ("%s: RX: checking descriptor %d\n",
1905 sc->sc_dev.dv_xname, i));
1906
1907 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1908
1909 status = sc->sc_rxdescs[i].wrx_status;
1910 errors = sc->sc_rxdescs[i].wrx_errors;
1911 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1912
1913 if ((status & WRX_ST_DD) == 0) {
1914 /*
1915 * We have processed all of the receive descriptors.
1916 */
1917 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1918 break;
1919 }
1920
1921 if (__predict_false(sc->sc_rxdiscard)) {
1922 DPRINTF(WM_DEBUG_RX,
1923 ("%s: RX: discarding contents of descriptor %d\n",
1924 sc->sc_dev.dv_xname, i));
1925 WM_INIT_RXDESC(sc, i);
1926 if (status & WRX_ST_EOP) {
1927 /* Reset our state. */
1928 DPRINTF(WM_DEBUG_RX,
1929 ("%s: RX: resetting rxdiscard -> 0\n",
1930 sc->sc_dev.dv_xname));
1931 sc->sc_rxdiscard = 0;
1932 }
1933 continue;
1934 }
1935
1936 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1937 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1938
1939 m = rxs->rxs_mbuf;
1940
1941 /*
1942 * Add a new receive buffer to the ring.
1943 */
1944 if (wm_add_rxbuf(sc, i) != 0) {
1945 /*
1946 * Failed, throw away what we've done so
1947 * far, and discard the rest of the packet.
1948 */
1949 ifp->if_ierrors++;
1950 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1951 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1952 WM_INIT_RXDESC(sc, i);
1953 if ((status & WRX_ST_EOP) == 0)
1954 sc->sc_rxdiscard = 1;
1955 if (sc->sc_rxhead != NULL)
1956 m_freem(sc->sc_rxhead);
1957 WM_RXCHAIN_RESET(sc);
1958 DPRINTF(WM_DEBUG_RX,
1959 ("%s: RX: Rx buffer allocation failed, "
1960 "dropping packet%s\n", sc->sc_dev.dv_xname,
1961 sc->sc_rxdiscard ? " (discard)" : ""));
1962 continue;
1963 }
1964
1965 WM_RXCHAIN_LINK(sc, m);
1966
1967 m->m_len = len;
1968
1969 DPRINTF(WM_DEBUG_RX,
1970 ("%s: RX: buffer at %p len %d\n",
1971 sc->sc_dev.dv_xname, m->m_data, len));
1972
1973 /*
1974 * If this is not the end of the packet, keep
1975 * looking.
1976 */
1977 if ((status & WRX_ST_EOP) == 0) {
1978 sc->sc_rxlen += len;
1979 DPRINTF(WM_DEBUG_RX,
1980 ("%s: RX: not yet EOP, rxlen -> %d\n",
1981 sc->sc_dev.dv_xname, sc->sc_rxlen));
1982 continue;
1983 }
1984
1985 /*
1986 * Okay, we have the entire packet now...
1987 */
1988 *sc->sc_rxtailp = NULL;
1989 m = sc->sc_rxhead;
1990 len += sc->sc_rxlen;
1991
1992 WM_RXCHAIN_RESET(sc);
1993
1994 DPRINTF(WM_DEBUG_RX,
1995 ("%s: RX: have entire packet, len -> %d\n",
1996 sc->sc_dev.dv_xname, len));
1997
1998 /*
1999 * If an error occurred, update stats and drop the packet.
2000 */
2001 if (errors &
2002 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2003 ifp->if_ierrors++;
2004 if (errors & WRX_ER_SE)
2005 printf("%s: symbol error\n",
2006 sc->sc_dev.dv_xname);
2007 else if (errors & WRX_ER_SEQ)
2008 printf("%s: receive sequence error\n",
2009 sc->sc_dev.dv_xname);
2010 else if (errors & WRX_ER_CE)
2011 printf("%s: CRC error\n",
2012 sc->sc_dev.dv_xname);
2013 m_freem(m);
2014 continue;
2015 }
2016
2017 /*
2018 * No errors. Receive the packet.
2019 *
2020 * Note, we have configured the chip to include the
2021 * CRC with every packet.
2022 */
2023 m->m_flags |= M_HASFCS;
2024 m->m_pkthdr.rcvif = ifp;
2025 m->m_pkthdr.len = len;
2026
2027 #if 0 /* XXXJRT */
2028 /*
2029 * If VLANs are enabled, VLAN packets have been unwrapped
2030 * for us. Associate the tag with the packet.
2031 */
2032 if (sc->sc_ethercom.ec_nvlans != 0 &&
2033 (status & WRX_ST_VP) != 0) {
2034 struct m_tag *vtag;
2035
2036 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2037 M_NOWAIT);
2038 if (vtag == NULL) {
2039 ifp->if_ierrors++;
2040 printf("%s: unable to allocate VLAN tag\n",
2041 sc->sc_dev.dv_xname);
2042 m_freem(m);
2043 continue;
2044 }
2045
2046 *(u_int *)(vtag + 1) =
2047 le16toh(sc->sc_rxdescs[i].wrx_special);
2048 }
2049 #endif /* XXXJRT */
2050
2051 /*
2052 * Set up checksum info for this packet.
2053 */
2054 if (status & WRX_ST_IPCS) {
2055 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2056 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2057 if (errors & WRX_ER_IPE)
2058 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2059 }
2060 if (status & WRX_ST_TCPCS) {
2061 /*
2062 * Note: we don't know if this was TCP or UDP,
2063 * so we just set both bits, and expect the
2064 * upper layers to deal.
2065 */
2066 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2067 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2068 if (errors & WRX_ER_TCPE)
2069 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2070 }
2071
2072 ifp->if_ipackets++;
2073
2074 #if NBPFILTER > 0
2075 /* Pass this up to any BPF listeners. */
2076 if (ifp->if_bpf)
2077 bpf_mtap(ifp->if_bpf, m);
2078 #endif /* NBPFILTER > 0 */
2079
2080 /* Pass it on. */
2081 (*ifp->if_input)(ifp, m);
2082 }
2083
2084 /* Update the receive pointer. */
2085 sc->sc_rxptr = i;
2086
2087 DPRINTF(WM_DEBUG_RX,
2088 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2089 }
2090
2091 /*
2092 * wm_linkintr:
2093 *
2094 * Helper; handle link interrupts.
2095 */
2096 static void
2097 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2098 {
2099 uint32_t status;
2100
2101 /*
2102 * If we get a link status interrupt on a 1000BASE-T
2103 * device, just fall into the normal MII tick path.
2104 */
2105 if (sc->sc_flags & WM_F_HAS_MII) {
2106 if (icr & ICR_LSC) {
2107 DPRINTF(WM_DEBUG_LINK,
2108 ("%s: LINK: LSC -> mii_tick\n",
2109 sc->sc_dev.dv_xname));
2110 mii_tick(&sc->sc_mii);
2111 } else if (icr & ICR_RXSEQ) {
2112 DPRINTF(WM_DEBUG_LINK,
2113 ("%s: LINK Receive sequence error\n",
2114 sc->sc_dev.dv_xname));
2115 }
2116 return;
2117 }
2118
2119 /*
2120 * If we are now receiving /C/, check for link again in
2121 * a couple of link clock ticks.
2122 */
2123 if (icr & ICR_RXCFG) {
2124 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2125 sc->sc_dev.dv_xname));
2126 sc->sc_tbi_anstate = 2;
2127 }
2128
2129 if (icr & ICR_LSC) {
2130 status = CSR_READ(sc, WMREG_STATUS);
2131 if (status & STATUS_LU) {
2132 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2133 sc->sc_dev.dv_xname,
2134 (status & STATUS_FD) ? "FDX" : "HDX"));
2135 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2136 if (status & STATUS_FD)
2137 sc->sc_tctl |=
2138 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2139 else
2140 sc->sc_tctl |=
2141 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2142 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2143 sc->sc_tbi_linkup = 1;
2144 } else {
2145 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2146 sc->sc_dev.dv_xname));
2147 sc->sc_tbi_linkup = 0;
2148 }
2149 sc->sc_tbi_anstate = 2;
2150 wm_tbi_set_linkled(sc);
2151 } else if (icr & ICR_RXSEQ) {
2152 DPRINTF(WM_DEBUG_LINK,
2153 ("%s: LINK: Receive sequence error\n",
2154 sc->sc_dev.dv_xname));
2155 }
2156 }
2157
2158 /*
2159 * wm_tick:
2160 *
2161 * One second timer, used to check link status, sweep up
2162 * completed transmit jobs, etc.
2163 */
2164 static void
2165 wm_tick(void *arg)
2166 {
2167 struct wm_softc *sc = arg;
2168 int s;
2169
2170 s = splnet();
2171
2172 if (sc->sc_flags & WM_F_HAS_MII)
2173 mii_tick(&sc->sc_mii);
2174 else
2175 wm_tbi_check_link(sc);
2176
2177 splx(s);
2178
2179 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2180 }
2181
2182 /*
2183 * wm_reset:
2184 *
2185 * Reset the i82542 chip.
2186 */
2187 static void
2188 wm_reset(struct wm_softc *sc)
2189 {
2190 int i;
2191
2192 switch (sc->sc_type) {
2193 case WM_T_82544:
2194 case WM_T_82540:
2195 case WM_T_82545:
2196 case WM_T_82546:
2197 case WM_T_82541:
2198 case WM_T_82541_2:
2199 /*
2200 * These chips have a problem with the memory-mapped
2201 * write cycle when issuing the reset, so use I/O-mapped
2202 * access, if possible.
2203 */
2204 if (sc->sc_flags & WM_F_IOH_VALID)
2205 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2206 else
2207 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2208 break;
2209
2210 case WM_T_82545_3:
2211 case WM_T_82546_3:
2212 /* Use the shadow control register on these chips. */
2213 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2214 break;
2215
2216 default:
2217 /* Everything else can safely use the documented method. */
2218 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2219 break;
2220 }
2221 delay(10000);
2222
2223 for (i = 0; i < 1000; i++) {
2224 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2225 return;
2226 delay(20);
2227 }
2228
2229 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2230 printf("%s: WARNING: reset failed to complete\n",
2231 sc->sc_dev.dv_xname);
2232 }
2233
2234 /*
2235 * wm_init: [ifnet interface function]
2236 *
2237 * Initialize the interface. Must be called at splnet().
2238 */
2239 static int
2240 wm_init(struct ifnet *ifp)
2241 {
2242 struct wm_softc *sc = ifp->if_softc;
2243 struct wm_rxsoft *rxs;
2244 int i, error = 0;
2245 uint32_t reg;
2246
2247 /*
2248 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2249 * There is a small but measurable benefit to avoiding the adjusment
2250 * of the descriptor so that the headers are aligned, for normal mtu,
2251 * on such platforms. One possibility is that the DMA itself is
2252 * slightly more efficient if the front of the entire packet (instead
2253 * of the front of the headers) is aligned.
2254 *
2255 * Note we must always set align_tweak to 0 if we are using
2256 * jumbo frames.
2257 */
2258 #ifdef __NO_STRICT_ALIGNMENT
2259 sc->sc_align_tweak = 0;
2260 #else
2261 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2262 sc->sc_align_tweak = 0;
2263 else
2264 sc->sc_align_tweak = 2;
2265 #endif /* __NO_STRICT_ALIGNMENT */
2266
2267 /* Cancel any pending I/O. */
2268 wm_stop(ifp, 0);
2269
2270 /* Reset the chip to a known state. */
2271 wm_reset(sc);
2272
2273 /* Initialize the transmit descriptor ring. */
2274 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2275 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2276 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2277 sc->sc_txfree = WM_NTXDESC;
2278 sc->sc_txnext = 0;
2279
2280 sc->sc_txctx_ipcs = 0xffffffff;
2281 sc->sc_txctx_tucs = 0xffffffff;
2282
2283 if (sc->sc_type < WM_T_82543) {
2284 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2285 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2286 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2287 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2288 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2289 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2290 } else {
2291 CSR_WRITE(sc, WMREG_TBDAH, 0);
2292 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2293 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2294 CSR_WRITE(sc, WMREG_TDH, 0);
2295 CSR_WRITE(sc, WMREG_TDT, 0);
2296 CSR_WRITE(sc, WMREG_TIDV, 128);
2297
2298 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2299 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2300 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2301 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2302 }
2303 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2304 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2305
2306 /* Initialize the transmit job descriptors. */
2307 for (i = 0; i < WM_TXQUEUELEN; i++)
2308 sc->sc_txsoft[i].txs_mbuf = NULL;
2309 sc->sc_txsfree = WM_TXQUEUELEN;
2310 sc->sc_txsnext = 0;
2311 sc->sc_txsdirty = 0;
2312
2313 /*
2314 * Initialize the receive descriptor and receive job
2315 * descriptor rings.
2316 */
2317 if (sc->sc_type < WM_T_82543) {
2318 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2319 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2320 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2321 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2322 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2323 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2324
2325 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2326 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2327 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2328 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2329 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2330 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2331 } else {
2332 CSR_WRITE(sc, WMREG_RDBAH, 0);
2333 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2334 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2335 CSR_WRITE(sc, WMREG_RDH, 0);
2336 CSR_WRITE(sc, WMREG_RDT, 0);
2337 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2338 }
2339 for (i = 0; i < WM_NRXDESC; i++) {
2340 rxs = &sc->sc_rxsoft[i];
2341 if (rxs->rxs_mbuf == NULL) {
2342 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2343 printf("%s: unable to allocate or map rx "
2344 "buffer %d, error = %d\n",
2345 sc->sc_dev.dv_xname, i, error);
2346 /*
2347 * XXX Should attempt to run with fewer receive
2348 * XXX buffers instead of just failing.
2349 */
2350 wm_rxdrain(sc);
2351 goto out;
2352 }
2353 } else
2354 WM_INIT_RXDESC(sc, i);
2355 }
2356 sc->sc_rxptr = 0;
2357 sc->sc_rxdiscard = 0;
2358 WM_RXCHAIN_RESET(sc);
2359
2360 /*
2361 * Clear out the VLAN table -- we don't use it (yet).
2362 */
2363 CSR_WRITE(sc, WMREG_VET, 0);
2364 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2365 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2366
2367 /*
2368 * Set up flow-control parameters.
2369 *
2370 * XXX Values could probably stand some tuning.
2371 */
2372 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2373 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2374 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2375 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2376
2377 if (sc->sc_type < WM_T_82543) {
2378 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2379 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2380 } else {
2381 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2382 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2383 }
2384 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2385 }
2386
2387 #if 0 /* XXXJRT */
2388 /* Deal with VLAN enables. */
2389 if (sc->sc_ethercom.ec_nvlans != 0)
2390 sc->sc_ctrl |= CTRL_VME;
2391 else
2392 #endif /* XXXJRT */
2393 sc->sc_ctrl &= ~CTRL_VME;
2394
2395 /* Write the control registers. */
2396 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2397 #if 0
2398 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2399 #endif
2400
2401 /*
2402 * Set up checksum offload parameters.
2403 */
2404 reg = CSR_READ(sc, WMREG_RXCSUM);
2405 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2406 reg |= RXCSUM_IPOFL;
2407 else
2408 reg &= ~RXCSUM_IPOFL;
2409 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2410 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2411 else {
2412 reg &= ~RXCSUM_TUOFL;
2413 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2414 reg &= ~RXCSUM_IPOFL;
2415 }
2416 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2417
2418 /*
2419 * Set up the interrupt registers.
2420 */
2421 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2422 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2423 ICR_RXO | ICR_RXT0;
2424 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2425 sc->sc_icr |= ICR_RXCFG;
2426 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2427
2428 /* Set up the inter-packet gap. */
2429 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2430
2431 #if 0 /* XXXJRT */
2432 /* Set the VLAN ethernetype. */
2433 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2434 #endif
2435
2436 /*
2437 * Set up the transmit control register; we start out with
2438 * a collision distance suitable for FDX, but update it whe
2439 * we resolve the media type.
2440 */
2441 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2442 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2443 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2444
2445 /* Set the media. */
2446 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2447
2448 /*
2449 * Set up the receive control register; we actually program
2450 * the register when we set the receive filter. Use multicast
2451 * address offset type 0.
2452 *
2453 * Only the i82544 has the ability to strip the incoming
2454 * CRC, so we don't enable that feature.
2455 */
2456 sc->sc_mchash_type = 0;
2457 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2458 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2459
2460 if(MCLBYTES == 2048) {
2461 sc->sc_rctl |= RCTL_2k;
2462 } else {
2463 /*
2464 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2465 * XXX segments, dropping" -- why?
2466 */
2467 #if 0
2468 if(sc->sc_type >= WM_T_82543) {
2469 switch(MCLBYTES) {
2470 case 4096:
2471 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2472 break;
2473 case 8192:
2474 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2475 break;
2476 case 16384:
2477 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2478 break;
2479 default:
2480 panic("wm_init: MCLBYTES %d unsupported",
2481 MCLBYTES);
2482 break;
2483 }
2484 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2485 #else
2486 panic("wm_init: MCLBYTES > 2048 not supported.");
2487 #endif
2488 }
2489
2490 /* Set the receive filter. */
2491 wm_set_filter(sc);
2492
2493 /* Start the one second link check clock. */
2494 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2495
2496 /* ...all done! */
2497 ifp->if_flags |= IFF_RUNNING;
2498 ifp->if_flags &= ~IFF_OACTIVE;
2499
2500 out:
2501 if (error)
2502 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2503 return (error);
2504 }
2505
2506 /*
2507 * wm_rxdrain:
2508 *
2509 * Drain the receive queue.
2510 */
2511 static void
2512 wm_rxdrain(struct wm_softc *sc)
2513 {
2514 struct wm_rxsoft *rxs;
2515 int i;
2516
2517 for (i = 0; i < WM_NRXDESC; i++) {
2518 rxs = &sc->sc_rxsoft[i];
2519 if (rxs->rxs_mbuf != NULL) {
2520 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2521 m_freem(rxs->rxs_mbuf);
2522 rxs->rxs_mbuf = NULL;
2523 }
2524 }
2525 }
2526
2527 /*
2528 * wm_stop: [ifnet interface function]
2529 *
2530 * Stop transmission on the interface.
2531 */
2532 static void
2533 wm_stop(struct ifnet *ifp, int disable)
2534 {
2535 struct wm_softc *sc = ifp->if_softc;
2536 struct wm_txsoft *txs;
2537 int i;
2538
2539 /* Stop the one second clock. */
2540 callout_stop(&sc->sc_tick_ch);
2541
2542 if (sc->sc_flags & WM_F_HAS_MII) {
2543 /* Down the MII. */
2544 mii_down(&sc->sc_mii);
2545 }
2546
2547 /* Stop the transmit and receive processes. */
2548 CSR_WRITE(sc, WMREG_TCTL, 0);
2549 CSR_WRITE(sc, WMREG_RCTL, 0);
2550
2551 /* Release any queued transmit buffers. */
2552 for (i = 0; i < WM_TXQUEUELEN; i++) {
2553 txs = &sc->sc_txsoft[i];
2554 if (txs->txs_mbuf != NULL) {
2555 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2556 m_freem(txs->txs_mbuf);
2557 txs->txs_mbuf = NULL;
2558 }
2559 }
2560
2561 if (disable)
2562 wm_rxdrain(sc);
2563
2564 /* Mark the interface as down and cancel the watchdog timer. */
2565 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2566 ifp->if_timer = 0;
2567 }
2568
2569 /*
2570 * wm_acquire_eeprom:
2571 *
2572 * Perform the EEPROM handshake required on some chips.
2573 */
2574 static int
2575 wm_acquire_eeprom(struct wm_softc *sc)
2576 {
2577 uint32_t reg;
2578 int x;
2579
2580 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2581 reg = CSR_READ(sc, WMREG_EECD);
2582
2583 /* Request EEPROM access. */
2584 reg |= EECD_EE_REQ;
2585 CSR_WRITE(sc, WMREG_EECD, reg);
2586
2587 /* ..and wait for it to be granted. */
2588 for (x = 0; x < 100; x++) {
2589 reg = CSR_READ(sc, WMREG_EECD);
2590 if (reg & EECD_EE_GNT)
2591 break;
2592 delay(5);
2593 }
2594 if ((reg & EECD_EE_GNT) == 0) {
2595 aprint_error("%s: could not acquire EEPROM GNT\n",
2596 sc->sc_dev.dv_xname);
2597 reg &= ~EECD_EE_REQ;
2598 CSR_WRITE(sc, WMREG_EECD, reg);
2599 return (1);
2600 }
2601 }
2602
2603 return (0);
2604 }
2605
2606 /*
2607 * wm_release_eeprom:
2608 *
2609 * Release the EEPROM mutex.
2610 */
2611 static void
2612 wm_release_eeprom(struct wm_softc *sc)
2613 {
2614 uint32_t reg;
2615
2616 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2617 reg = CSR_READ(sc, WMREG_EECD);
2618 reg &= ~EECD_EE_REQ;
2619 CSR_WRITE(sc, WMREG_EECD, reg);
2620 }
2621 }
2622
2623 /*
2624 * wm_eeprom_sendbits:
2625 *
2626 * Send a series of bits to the EEPROM.
2627 */
2628 static void
2629 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2630 {
2631 uint32_t reg;
2632 int x;
2633
2634 reg = CSR_READ(sc, WMREG_EECD);
2635
2636 for (x = nbits; x > 0; x--) {
2637 if (bits & (1U << (x - 1)))
2638 reg |= EECD_DI;
2639 else
2640 reg &= ~EECD_DI;
2641 CSR_WRITE(sc, WMREG_EECD, reg);
2642 delay(2);
2643 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2644 delay(2);
2645 CSR_WRITE(sc, WMREG_EECD, reg);
2646 delay(2);
2647 }
2648 }
2649
2650 /*
2651 * wm_eeprom_recvbits:
2652 *
2653 * Receive a series of bits from the EEPROM.
2654 */
2655 static void
2656 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2657 {
2658 uint32_t reg, val;
2659 int x;
2660
2661 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2662
2663 val = 0;
2664 for (x = nbits; x > 0; x--) {
2665 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2666 delay(2);
2667 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2668 val |= (1U << (x - 1));
2669 CSR_WRITE(sc, WMREG_EECD, reg);
2670 delay(2);
2671 }
2672 *valp = val;
2673 }
2674
2675 /*
2676 * wm_read_eeprom_uwire:
2677 *
2678 * Read a word from the EEPROM using the MicroWire protocol.
2679 */
2680 static int
2681 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2682 {
2683 uint32_t reg, val;
2684 int i;
2685
2686 for (i = 0; i < wordcnt; i++) {
2687 /* Clear SK and DI. */
2688 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2689 CSR_WRITE(sc, WMREG_EECD, reg);
2690
2691 /* Set CHIP SELECT. */
2692 reg |= EECD_CS;
2693 CSR_WRITE(sc, WMREG_EECD, reg);
2694 delay(2);
2695
2696 /* Shift in the READ command. */
2697 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2698
2699 /* Shift in address. */
2700 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2701
2702 /* Shift out the data. */
2703 wm_eeprom_recvbits(sc, &val, 16);
2704 data[i] = val & 0xffff;
2705
2706 /* Clear CHIP SELECT. */
2707 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2708 CSR_WRITE(sc, WMREG_EECD, reg);
2709 delay(2);
2710 }
2711
2712 return (0);
2713 }
2714
2715 /*
2716 * wm_spi_eeprom_ready:
2717 *
2718 * Wait for a SPI EEPROM to be ready for commands.
2719 */
2720 static int
2721 wm_spi_eeprom_ready(struct wm_softc *sc)
2722 {
2723 uint32_t val;
2724 int usec;
2725
2726 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2727 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2728 wm_eeprom_recvbits(sc, &val, 8);
2729 if ((val & SPI_SR_RDY) == 0)
2730 break;
2731 }
2732 if (usec >= SPI_MAX_RETRIES) {
2733 aprint_error("%s: EEPROM failed to become ready\n",
2734 sc->sc_dev.dv_xname);
2735 return (1);
2736 }
2737 return (0);
2738 }
2739
2740 /*
2741 * wm_read_eeprom_spi:
2742 *
2743 * Read a work from the EEPROM using the SPI protocol.
2744 */
2745 static int
2746 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2747 {
2748 uint32_t reg, val;
2749 int i;
2750 uint8_t opc;
2751
2752 /* Clear SK and CS. */
2753 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2754 CSR_WRITE(sc, WMREG_EECD, reg);
2755 delay(2);
2756
2757 if (wm_spi_eeprom_ready(sc))
2758 return (1);
2759
2760 /* Toggle CS to flush commands. */
2761 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2762 delay(2);
2763 CSR_WRITE(sc, WMREG_EECD, reg);
2764 delay(2);
2765
2766 opc = SPI_OPC_READ;
2767 if (sc->sc_ee_addrbits == 8 && word >= 128)
2768 opc |= SPI_OPC_A8;
2769
2770 wm_eeprom_sendbits(sc, opc, 8);
2771 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2772
2773 for (i = 0; i < wordcnt; i++) {
2774 wm_eeprom_recvbits(sc, &val, 16);
2775 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2776 }
2777
2778 /* Raise CS and clear SK. */
2779 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2780 CSR_WRITE(sc, WMREG_EECD, reg);
2781 delay(2);
2782
2783 return (0);
2784 }
2785
2786 /*
2787 * wm_read_eeprom:
2788 *
2789 * Read data from the serial EEPROM.
2790 */
2791 static int
2792 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2793 {
2794 int rv;
2795
2796 if (wm_acquire_eeprom(sc))
2797 return (1);
2798
2799 if (sc->sc_flags & WM_F_EEPROM_SPI)
2800 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2801 else
2802 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2803
2804 wm_release_eeprom(sc);
2805 return (rv);
2806 }
2807
2808 /*
2809 * wm_add_rxbuf:
2810 *
2811 * Add a receive buffer to the indiciated descriptor.
2812 */
2813 static int
2814 wm_add_rxbuf(struct wm_softc *sc, int idx)
2815 {
2816 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2817 struct mbuf *m;
2818 int error;
2819
2820 MGETHDR(m, M_DONTWAIT, MT_DATA);
2821 if (m == NULL)
2822 return (ENOBUFS);
2823
2824 MCLGET(m, M_DONTWAIT);
2825 if ((m->m_flags & M_EXT) == 0) {
2826 m_freem(m);
2827 return (ENOBUFS);
2828 }
2829
2830 if (rxs->rxs_mbuf != NULL)
2831 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2832
2833 rxs->rxs_mbuf = m;
2834
2835 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2836 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2837 BUS_DMA_READ|BUS_DMA_NOWAIT);
2838 if (error) {
2839 printf("%s: unable to load rx DMA map %d, error = %d\n",
2840 sc->sc_dev.dv_xname, idx, error);
2841 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2842 }
2843
2844 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2845 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2846
2847 WM_INIT_RXDESC(sc, idx);
2848
2849 return (0);
2850 }
2851
2852 /*
2853 * wm_set_ral:
2854 *
2855 * Set an entery in the receive address list.
2856 */
2857 static void
2858 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2859 {
2860 uint32_t ral_lo, ral_hi;
2861
2862 if (enaddr != NULL) {
2863 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2864 (enaddr[3] << 24);
2865 ral_hi = enaddr[4] | (enaddr[5] << 8);
2866 ral_hi |= RAL_AV;
2867 } else {
2868 ral_lo = 0;
2869 ral_hi = 0;
2870 }
2871
2872 if (sc->sc_type >= WM_T_82544) {
2873 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2874 ral_lo);
2875 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2876 ral_hi);
2877 } else {
2878 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2879 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2880 }
2881 }
2882
2883 /*
2884 * wm_mchash:
2885 *
2886 * Compute the hash of the multicast address for the 4096-bit
2887 * multicast filter.
2888 */
2889 static uint32_t
2890 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2891 {
2892 static const int lo_shift[4] = { 4, 3, 2, 0 };
2893 static const int hi_shift[4] = { 4, 5, 6, 8 };
2894 uint32_t hash;
2895
2896 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2897 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2898
2899 return (hash & 0xfff);
2900 }
2901
2902 /*
2903 * wm_set_filter:
2904 *
2905 * Set up the receive filter.
2906 */
2907 static void
2908 wm_set_filter(struct wm_softc *sc)
2909 {
2910 struct ethercom *ec = &sc->sc_ethercom;
2911 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2912 struct ether_multi *enm;
2913 struct ether_multistep step;
2914 bus_addr_t mta_reg;
2915 uint32_t hash, reg, bit;
2916 int i;
2917
2918 if (sc->sc_type >= WM_T_82544)
2919 mta_reg = WMREG_CORDOVA_MTA;
2920 else
2921 mta_reg = WMREG_MTA;
2922
2923 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2924
2925 if (ifp->if_flags & IFF_BROADCAST)
2926 sc->sc_rctl |= RCTL_BAM;
2927 if (ifp->if_flags & IFF_PROMISC) {
2928 sc->sc_rctl |= RCTL_UPE;
2929 goto allmulti;
2930 }
2931
2932 /*
2933 * Set the station address in the first RAL slot, and
2934 * clear the remaining slots.
2935 */
2936 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2937 for (i = 1; i < WM_RAL_TABSIZE; i++)
2938 wm_set_ral(sc, NULL, i);
2939
2940 /* Clear out the multicast table. */
2941 for (i = 0; i < WM_MC_TABSIZE; i++)
2942 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2943
2944 ETHER_FIRST_MULTI(step, ec, enm);
2945 while (enm != NULL) {
2946 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2947 /*
2948 * We must listen to a range of multicast addresses.
2949 * For now, just accept all multicasts, rather than
2950 * trying to set only those filter bits needed to match
2951 * the range. (At this time, the only use of address
2952 * ranges is for IP multicast routing, for which the
2953 * range is big enough to require all bits set.)
2954 */
2955 goto allmulti;
2956 }
2957
2958 hash = wm_mchash(sc, enm->enm_addrlo);
2959
2960 reg = (hash >> 5) & 0x7f;
2961 bit = hash & 0x1f;
2962
2963 hash = CSR_READ(sc, mta_reg + (reg << 2));
2964 hash |= 1U << bit;
2965
2966 /* XXX Hardware bug?? */
2967 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2968 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2969 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2970 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2971 } else
2972 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2973
2974 ETHER_NEXT_MULTI(step, enm);
2975 }
2976
2977 ifp->if_flags &= ~IFF_ALLMULTI;
2978 goto setit;
2979
2980 allmulti:
2981 ifp->if_flags |= IFF_ALLMULTI;
2982 sc->sc_rctl |= RCTL_MPE;
2983
2984 setit:
2985 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2986 }
2987
2988 /*
2989 * wm_tbi_mediainit:
2990 *
2991 * Initialize media for use on 1000BASE-X devices.
2992 */
2993 static void
2994 wm_tbi_mediainit(struct wm_softc *sc)
2995 {
2996 const char *sep = "";
2997
2998 if (sc->sc_type < WM_T_82543)
2999 sc->sc_tipg = TIPG_WM_DFLT;
3000 else
3001 sc->sc_tipg = TIPG_LG_DFLT;
3002
3003 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3004 wm_tbi_mediastatus);
3005
3006 /*
3007 * SWD Pins:
3008 *
3009 * 0 = Link LED (output)
3010 * 1 = Loss Of Signal (input)
3011 */
3012 sc->sc_ctrl |= CTRL_SWDPIO(0);
3013 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3014
3015 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3016
3017 #define ADD(ss, mm, dd) \
3018 do { \
3019 printf("%s%s", sep, ss); \
3020 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3021 sep = ", "; \
3022 } while (/*CONSTCOND*/0)
3023
3024 printf("%s: ", sc->sc_dev.dv_xname);
3025 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3026 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3027 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3028 printf("\n");
3029
3030 #undef ADD
3031
3032 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3033 }
3034
3035 /*
3036 * wm_tbi_mediastatus: [ifmedia interface function]
3037 *
3038 * Get the current interface media status on a 1000BASE-X device.
3039 */
3040 static void
3041 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3042 {
3043 struct wm_softc *sc = ifp->if_softc;
3044
3045 ifmr->ifm_status = IFM_AVALID;
3046 ifmr->ifm_active = IFM_ETHER;
3047
3048 if (sc->sc_tbi_linkup == 0) {
3049 ifmr->ifm_active |= IFM_NONE;
3050 return;
3051 }
3052
3053 ifmr->ifm_status |= IFM_ACTIVE;
3054 ifmr->ifm_active |= IFM_1000_SX;
3055 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3056 ifmr->ifm_active |= IFM_FDX;
3057 }
3058
3059 /*
3060 * wm_tbi_mediachange: [ifmedia interface function]
3061 *
3062 * Set hardware to newly-selected media on a 1000BASE-X device.
3063 */
3064 static int
3065 wm_tbi_mediachange(struct ifnet *ifp)
3066 {
3067 struct wm_softc *sc = ifp->if_softc;
3068 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3069 uint32_t status;
3070 int i;
3071
3072 sc->sc_txcw = ife->ifm_data;
3073 if (sc->sc_ctrl & CTRL_RFCE)
3074 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
3075 if (sc->sc_ctrl & CTRL_TFCE)
3076 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
3077 sc->sc_txcw |= TXCW_ANE;
3078
3079 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3080 delay(10000);
3081
3082 sc->sc_tbi_anstate = 0;
3083
3084 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3085 /* Have signal; wait for the link to come up. */
3086 for (i = 0; i < 50; i++) {
3087 delay(10000);
3088 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3089 break;
3090 }
3091
3092 status = CSR_READ(sc, WMREG_STATUS);
3093 if (status & STATUS_LU) {
3094 /* Link is up. */
3095 DPRINTF(WM_DEBUG_LINK,
3096 ("%s: LINK: set media -> link up %s\n",
3097 sc->sc_dev.dv_xname,
3098 (status & STATUS_FD) ? "FDX" : "HDX"));
3099 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3100 if (status & STATUS_FD)
3101 sc->sc_tctl |=
3102 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3103 else
3104 sc->sc_tctl |=
3105 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3106 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3107 sc->sc_tbi_linkup = 1;
3108 } else {
3109 /* Link is down. */
3110 DPRINTF(WM_DEBUG_LINK,
3111 ("%s: LINK: set media -> link down\n",
3112 sc->sc_dev.dv_xname));
3113 sc->sc_tbi_linkup = 0;
3114 }
3115 } else {
3116 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3117 sc->sc_dev.dv_xname));
3118 sc->sc_tbi_linkup = 0;
3119 }
3120
3121 wm_tbi_set_linkled(sc);
3122
3123 return (0);
3124 }
3125
3126 /*
3127 * wm_tbi_set_linkled:
3128 *
3129 * Update the link LED on 1000BASE-X devices.
3130 */
3131 static void
3132 wm_tbi_set_linkled(struct wm_softc *sc)
3133 {
3134
3135 if (sc->sc_tbi_linkup)
3136 sc->sc_ctrl |= CTRL_SWDPIN(0);
3137 else
3138 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3139
3140 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3141 }
3142
3143 /*
3144 * wm_tbi_check_link:
3145 *
3146 * Check the link on 1000BASE-X devices.
3147 */
3148 static void
3149 wm_tbi_check_link(struct wm_softc *sc)
3150 {
3151 uint32_t rxcw, ctrl, status;
3152
3153 if (sc->sc_tbi_anstate == 0)
3154 return;
3155 else if (sc->sc_tbi_anstate > 1) {
3156 DPRINTF(WM_DEBUG_LINK,
3157 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3158 sc->sc_tbi_anstate));
3159 sc->sc_tbi_anstate--;
3160 return;
3161 }
3162
3163 sc->sc_tbi_anstate = 0;
3164
3165 rxcw = CSR_READ(sc, WMREG_RXCW);
3166 ctrl = CSR_READ(sc, WMREG_CTRL);
3167 status = CSR_READ(sc, WMREG_STATUS);
3168
3169 if ((status & STATUS_LU) == 0) {
3170 DPRINTF(WM_DEBUG_LINK,
3171 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3172 sc->sc_tbi_linkup = 0;
3173 } else {
3174 DPRINTF(WM_DEBUG_LINK,
3175 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3176 (status & STATUS_FD) ? "FDX" : "HDX"));
3177 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3178 if (status & STATUS_FD)
3179 sc->sc_tctl |=
3180 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3181 else
3182 sc->sc_tctl |=
3183 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3184 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3185 sc->sc_tbi_linkup = 1;
3186 }
3187
3188 wm_tbi_set_linkled(sc);
3189 }
3190
3191 /*
3192 * wm_gmii_reset:
3193 *
3194 * Reset the PHY.
3195 */
3196 static void
3197 wm_gmii_reset(struct wm_softc *sc)
3198 {
3199 uint32_t reg;
3200
3201 if (sc->sc_type >= WM_T_82544) {
3202 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3203 delay(20000);
3204
3205 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3206 delay(20000);
3207 } else {
3208 /* The PHY reset pin is active-low. */
3209 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3210 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3211 CTRL_EXT_SWDPIN(4));
3212 reg |= CTRL_EXT_SWDPIO(4);
3213
3214 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3215 delay(10);
3216
3217 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3218 delay(10);
3219
3220 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3221 delay(10);
3222 #if 0
3223 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3224 #endif
3225 }
3226 }
3227
3228 /*
3229 * wm_gmii_mediainit:
3230 *
3231 * Initialize media for use on 1000BASE-T devices.
3232 */
3233 static void
3234 wm_gmii_mediainit(struct wm_softc *sc)
3235 {
3236 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3237
3238 /* We have MII. */
3239 sc->sc_flags |= WM_F_HAS_MII;
3240
3241 sc->sc_tipg = TIPG_1000T_DFLT;
3242
3243 /*
3244 * Let the chip set speed/duplex on its own based on
3245 * signals from the PHY.
3246 */
3247 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3248 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3249
3250 /* Initialize our media structures and probe the GMII. */
3251 sc->sc_mii.mii_ifp = ifp;
3252
3253 if (sc->sc_type >= WM_T_82544) {
3254 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3255 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3256 } else {
3257 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3258 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3259 }
3260 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3261
3262 wm_gmii_reset(sc);
3263
3264 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3265 wm_gmii_mediastatus);
3266
3267 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3268 MII_OFFSET_ANY, 0);
3269 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3270 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3271 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3272 } else
3273 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3274 }
3275
3276 /*
3277 * wm_gmii_mediastatus: [ifmedia interface function]
3278 *
3279 * Get the current interface media status on a 1000BASE-T device.
3280 */
3281 static void
3282 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3283 {
3284 struct wm_softc *sc = ifp->if_softc;
3285
3286 mii_pollstat(&sc->sc_mii);
3287 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3288 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3289 }
3290
3291 /*
3292 * wm_gmii_mediachange: [ifmedia interface function]
3293 *
3294 * Set hardware to newly-selected media on a 1000BASE-T device.
3295 */
3296 static int
3297 wm_gmii_mediachange(struct ifnet *ifp)
3298 {
3299 struct wm_softc *sc = ifp->if_softc;
3300
3301 if (ifp->if_flags & IFF_UP)
3302 mii_mediachg(&sc->sc_mii);
3303 return (0);
3304 }
3305
3306 #define MDI_IO CTRL_SWDPIN(2)
3307 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3308 #define MDI_CLK CTRL_SWDPIN(3)
3309
3310 static void
3311 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3312 {
3313 uint32_t i, v;
3314
3315 v = CSR_READ(sc, WMREG_CTRL);
3316 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3317 v |= MDI_DIR | CTRL_SWDPIO(3);
3318
3319 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3320 if (data & i)
3321 v |= MDI_IO;
3322 else
3323 v &= ~MDI_IO;
3324 CSR_WRITE(sc, WMREG_CTRL, v);
3325 delay(10);
3326 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3327 delay(10);
3328 CSR_WRITE(sc, WMREG_CTRL, v);
3329 delay(10);
3330 }
3331 }
3332
3333 static uint32_t
3334 i82543_mii_recvbits(struct wm_softc *sc)
3335 {
3336 uint32_t v, i, data = 0;
3337
3338 v = CSR_READ(sc, WMREG_CTRL);
3339 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3340 v |= CTRL_SWDPIO(3);
3341
3342 CSR_WRITE(sc, WMREG_CTRL, v);
3343 delay(10);
3344 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3345 delay(10);
3346 CSR_WRITE(sc, WMREG_CTRL, v);
3347 delay(10);
3348
3349 for (i = 0; i < 16; i++) {
3350 data <<= 1;
3351 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3352 delay(10);
3353 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3354 data |= 1;
3355 CSR_WRITE(sc, WMREG_CTRL, v);
3356 delay(10);
3357 }
3358
3359 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3360 delay(10);
3361 CSR_WRITE(sc, WMREG_CTRL, v);
3362 delay(10);
3363
3364 return (data);
3365 }
3366
3367 #undef MDI_IO
3368 #undef MDI_DIR
3369 #undef MDI_CLK
3370
3371 /*
3372 * wm_gmii_i82543_readreg: [mii interface function]
3373 *
3374 * Read a PHY register on the GMII (i82543 version).
3375 */
3376 static int
3377 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3378 {
3379 struct wm_softc *sc = (void *) self;
3380 int rv;
3381
3382 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3383 i82543_mii_sendbits(sc, reg | (phy << 5) |
3384 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3385 rv = i82543_mii_recvbits(sc) & 0xffff;
3386
3387 DPRINTF(WM_DEBUG_GMII,
3388 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3389 sc->sc_dev.dv_xname, phy, reg, rv));
3390
3391 return (rv);
3392 }
3393
3394 /*
3395 * wm_gmii_i82543_writereg: [mii interface function]
3396 *
3397 * Write a PHY register on the GMII (i82543 version).
3398 */
3399 static void
3400 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3401 {
3402 struct wm_softc *sc = (void *) self;
3403
3404 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3405 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3406 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3407 (MII_COMMAND_START << 30), 32);
3408 }
3409
3410 /*
3411 * wm_gmii_i82544_readreg: [mii interface function]
3412 *
3413 * Read a PHY register on the GMII.
3414 */
3415 static int
3416 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3417 {
3418 struct wm_softc *sc = (void *) self;
3419 uint32_t mdic = 0;
3420 int i, rv;
3421
3422 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3423 MDIC_REGADD(reg));
3424
3425 for (i = 0; i < 100; i++) {
3426 mdic = CSR_READ(sc, WMREG_MDIC);
3427 if (mdic & MDIC_READY)
3428 break;
3429 delay(10);
3430 }
3431
3432 if ((mdic & MDIC_READY) == 0) {
3433 printf("%s: MDIC read timed out: phy %d reg %d\n",
3434 sc->sc_dev.dv_xname, phy, reg);
3435 rv = 0;
3436 } else if (mdic & MDIC_E) {
3437 #if 0 /* This is normal if no PHY is present. */
3438 printf("%s: MDIC read error: phy %d reg %d\n",
3439 sc->sc_dev.dv_xname, phy, reg);
3440 #endif
3441 rv = 0;
3442 } else {
3443 rv = MDIC_DATA(mdic);
3444 if (rv == 0xffff)
3445 rv = 0;
3446 }
3447
3448 return (rv);
3449 }
3450
3451 /*
3452 * wm_gmii_i82544_writereg: [mii interface function]
3453 *
3454 * Write a PHY register on the GMII.
3455 */
3456 static void
3457 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3458 {
3459 struct wm_softc *sc = (void *) self;
3460 uint32_t mdic = 0;
3461 int i;
3462
3463 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3464 MDIC_REGADD(reg) | MDIC_DATA(val));
3465
3466 for (i = 0; i < 100; i++) {
3467 mdic = CSR_READ(sc, WMREG_MDIC);
3468 if (mdic & MDIC_READY)
3469 break;
3470 delay(10);
3471 }
3472
3473 if ((mdic & MDIC_READY) == 0)
3474 printf("%s: MDIC write timed out: phy %d reg %d\n",
3475 sc->sc_dev.dv_xname, phy, reg);
3476 else if (mdic & MDIC_E)
3477 printf("%s: MDIC write error: phy %d reg %d\n",
3478 sc->sc_dev.dv_xname, phy, reg);
3479 }
3480
3481 /*
3482 * wm_gmii_statchg: [mii interface function]
3483 *
3484 * Callback from MII layer when media changes.
3485 */
3486 static void
3487 wm_gmii_statchg(struct device *self)
3488 {
3489 struct wm_softc *sc = (void *) self;
3490
3491 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3492
3493 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3494 DPRINTF(WM_DEBUG_LINK,
3495 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3496 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3497 } else {
3498 DPRINTF(WM_DEBUG_LINK,
3499 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3500 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3501 }
3502
3503 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3504 }
3505