if_wm.c revision 1.17 1 /* $NetBSD: if_wm.c,v 1.17 2002/08/08 00:12:08 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix TCP/UDP checksums.
44 * Status: Several successful transmissions with offloaded
45 * checksums occur. After several successful transmissions,
46 * the chip goes catatonic. The watchdog timer fires, which
47 * resets the chip, and gets things moving again, until the
48 * cycle repeats.
49 *
50 * - Make GMII work on the i82543.
51 *
52 * - Fix hw VLAN assist.
53 *
54 * - Jumbo frames -- requires changes to network stack due to
55 * lame buffer length handling on chip.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and manage
118 * up to 64 of them at a time. We allow up to 16 DMA segments per
119 * packet.
120 */
121 #define WM_NTXSEGS 16
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN 64
124 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
125 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
126 #define WM_NTXDESC 256
127 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
128 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
129 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
130
131 /*
132 * Receive descriptor list size. We have one Rx buffer for normal
133 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
134 * packet. We allocate 256 receive descriptors, each with a 2k
135 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
136 */
137 #define WM_NRXDESC 256
138 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
139 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
140 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
141
142 /*
143 * Control structures are DMA'd to the i82542 chip. We allocate them in
144 * a single clump that maps to a single DMA segment to make serveral things
145 * easier.
146 */
147 struct wm_control_data {
148 /*
149 * The transmit descriptors.
150 */
151 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
152
153 /*
154 * The receive descriptors.
155 */
156 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
157 };
158
159 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
160 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
161 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
162
163 /*
164 * Software state for transmit jobs.
165 */
166 struct wm_txsoft {
167 struct mbuf *txs_mbuf; /* head of our mbuf chain */
168 bus_dmamap_t txs_dmamap; /* our DMA map */
169 int txs_firstdesc; /* first descriptor in packet */
170 int txs_lastdesc; /* last descriptor in packet */
171 int txs_ndesc; /* # of descriptors used */
172 };
173
174 /*
175 * Software state for receive buffers. Each descriptor gets a
176 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
177 * more than one buffer, we chain them together.
178 */
179 struct wm_rxsoft {
180 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
181 bus_dmamap_t rxs_dmamap; /* our DMA map */
182 };
183
184 /*
185 * Software state per device.
186 */
187 struct wm_softc {
188 struct device sc_dev; /* generic device information */
189 bus_space_tag_t sc_st; /* bus space tag */
190 bus_space_handle_t sc_sh; /* bus space handle */
191 bus_dma_tag_t sc_dmat; /* bus DMA tag */
192 struct ethercom sc_ethercom; /* ethernet common data */
193 void *sc_sdhook; /* shutdown hook */
194
195 int sc_type; /* chip type; see below */
196 int sc_flags; /* flags; see below */
197
198 void *sc_ih; /* interrupt cookie */
199
200 struct mii_data sc_mii; /* MII/media information */
201
202 struct callout sc_tick_ch; /* tick callout */
203
204 bus_dmamap_t sc_cddmamap; /* control data DMA map */
205 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
206
207 /*
208 * Software state for the transmit and receive descriptors.
209 */
210 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
211 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
212
213 /*
214 * Control data structures.
215 */
216 struct wm_control_data *sc_control_data;
217 #define sc_txdescs sc_control_data->wcd_txdescs
218 #define sc_rxdescs sc_control_data->wcd_rxdescs
219
220 #ifdef WM_EVENT_COUNTERS
221 /* Event counters. */
222 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
223 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
224 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
225 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
226 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
227 struct evcnt sc_ev_rxintr; /* Rx interrupts */
228 struct evcnt sc_ev_linkintr; /* Link interrupts */
229
230 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
231 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
232 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
233 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
234
235 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
236 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
237 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
238
239 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
240 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
241
242 struct evcnt sc_ev_tu; /* Tx underrun */
243 #endif /* WM_EVENT_COUNTERS */
244
245 bus_addr_t sc_tdt_reg; /* offset of TDT register */
246
247 int sc_txfree; /* number of free Tx descriptors */
248 int sc_txnext; /* next ready Tx descriptor */
249
250 int sc_txsfree; /* number of free Tx jobs */
251 int sc_txsnext; /* next free Tx job */
252 int sc_txsdirty; /* dirty Tx jobs */
253
254 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
255 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
256
257 bus_addr_t sc_rdt_reg; /* offset of RDT register */
258
259 int sc_rxptr; /* next ready Rx descriptor/queue ent */
260 int sc_rxdiscard;
261 int sc_rxlen;
262 struct mbuf *sc_rxhead;
263 struct mbuf *sc_rxtail;
264 struct mbuf **sc_rxtailp;
265
266 uint32_t sc_ctrl; /* prototype CTRL register */
267 #if 0
268 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
269 #endif
270 uint32_t sc_icr; /* prototype interrupt bits */
271 uint32_t sc_tctl; /* prototype TCTL register */
272 uint32_t sc_rctl; /* prototype RCTL register */
273 uint32_t sc_txcw; /* prototype TXCW register */
274 uint32_t sc_tipg; /* prototype TIPG register */
275
276 int sc_tbi_linkup; /* TBI link status */
277 int sc_tbi_anstate; /* autonegotiation state */
278
279 int sc_mchash_type; /* multicast filter offset */
280 };
281
282 #define WM_RXCHAIN_RESET(sc) \
283 do { \
284 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
285 *(sc)->sc_rxtailp = NULL; \
286 (sc)->sc_rxlen = 0; \
287 } while (/*CONSTCOND*/0)
288
289 #define WM_RXCHAIN_LINK(sc, m) \
290 do { \
291 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
292 (sc)->sc_rxtailp = &(m)->m_next; \
293 } while (/*CONSTCOND*/0)
294
295 /* sc_type */
296 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
297 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
298 #define WM_T_82543 2 /* i82543 */
299 #define WM_T_82544 3 /* i82544 */
300 #define WM_T_82540 4 /* i82540 */
301 #define WM_T_82545 5 /* i82545 */
302 #define WM_T_82546 6 /* i82546 */
303
304 /* sc_flags */
305 #define WM_F_HAS_MII 0x01 /* has MII */
306 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
307
308 #ifdef WM_EVENT_COUNTERS
309 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
310 #else
311 #define WM_EVCNT_INCR(ev) /* nothing */
312 #endif
313
314 #define CSR_READ(sc, reg) \
315 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
316 #define CSR_WRITE(sc, reg, val) \
317 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
318
319 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
320 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
321
322 #define WM_CDTXSYNC(sc, x, n, ops) \
323 do { \
324 int __x, __n; \
325 \
326 __x = (x); \
327 __n = (n); \
328 \
329 /* If it will wrap around, sync to the end of the ring. */ \
330 if ((__x + __n) > WM_NTXDESC) { \
331 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
332 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
333 (WM_NTXDESC - __x), (ops)); \
334 __n -= (WM_NTXDESC - __x); \
335 __x = 0; \
336 } \
337 \
338 /* Now sync whatever is left. */ \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
341 } while (/*CONSTCOND*/0)
342
343 #define WM_CDRXSYNC(sc, x, ops) \
344 do { \
345 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
346 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
347 } while (/*CONSTCOND*/0)
348
349 #define WM_INIT_RXDESC(sc, x) \
350 do { \
351 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
352 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
353 struct mbuf *__m = __rxs->rxs_mbuf; \
354 \
355 /* \
356 * Note: We scoot the packet forward 2 bytes in the buffer \
357 * so that the payload after the Ethernet header is aligned \
358 * to a 4-byte boundary. \
359 * \
360 * XXX BRAINDAMAGE ALERT! \
361 * The stupid chip uses the same size for every buffer, which \
362 * is set in the Receive Control register. We are using the 2K \
363 * size option, but what we REALLY want is (2K - 2)! For this \
364 * reason, we can't accept packets longer than the standard \
365 * Ethernet MTU, without incurring a big penalty to copy every \
366 * incoming packet to a new, suitably aligned buffer. \
367 * \
368 * We'll need to make some changes to the layer 3/4 parts of \
369 * the stack (to copy the headers to a new buffer if not \
370 * aligned) in order to support large MTU on this chip. Lame. \
371 */ \
372 __m->m_data = __m->m_ext.ext_buf + 2; \
373 \
374 __rxd->wrx_addr.wa_low = \
375 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
376 __rxd->wrx_addr.wa_high = 0; \
377 __rxd->wrx_len = 0; \
378 __rxd->wrx_cksum = 0; \
379 __rxd->wrx_status = 0; \
380 __rxd->wrx_errors = 0; \
381 __rxd->wrx_special = 0; \
382 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
383 \
384 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
385 } while (/*CONSTCOND*/0)
386
387 void wm_start(struct ifnet *);
388 void wm_watchdog(struct ifnet *);
389 int wm_ioctl(struct ifnet *, u_long, caddr_t);
390 int wm_init(struct ifnet *);
391 void wm_stop(struct ifnet *, int);
392
393 void wm_shutdown(void *);
394
395 void wm_reset(struct wm_softc *);
396 void wm_rxdrain(struct wm_softc *);
397 int wm_add_rxbuf(struct wm_softc *, int);
398 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
399 void wm_tick(void *);
400
401 void wm_set_filter(struct wm_softc *);
402
403 int wm_intr(void *);
404 void wm_txintr(struct wm_softc *);
405 void wm_rxintr(struct wm_softc *);
406 void wm_linkintr(struct wm_softc *, uint32_t);
407
408 void wm_tbi_mediainit(struct wm_softc *);
409 int wm_tbi_mediachange(struct ifnet *);
410 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
411
412 void wm_tbi_set_linkled(struct wm_softc *);
413 void wm_tbi_check_link(struct wm_softc *);
414
415 void wm_gmii_reset(struct wm_softc *);
416
417 int wm_gmii_i82543_readreg(struct device *, int, int);
418 void wm_gmii_i82543_writereg(struct device *, int, int, int);
419
420 int wm_gmii_i82544_readreg(struct device *, int, int);
421 void wm_gmii_i82544_writereg(struct device *, int, int, int);
422
423 void wm_gmii_statchg(struct device *);
424
425 void wm_gmii_mediainit(struct wm_softc *);
426 int wm_gmii_mediachange(struct ifnet *);
427 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
428
429 int wm_match(struct device *, struct cfdata *, void *);
430 void wm_attach(struct device *, struct device *, void *);
431
432 int wm_copy_small = 0;
433
434 struct cfattach wm_ca = {
435 sizeof(struct wm_softc), wm_match, wm_attach,
436 };
437
438 /*
439 * Devices supported by this driver.
440 */
441 const struct wm_product {
442 pci_vendor_id_t wmp_vendor;
443 pci_product_id_t wmp_product;
444 const char *wmp_name;
445 int wmp_type;
446 int wmp_flags;
447 #define WMP_F_1000X 0x01
448 #define WMP_F_1000T 0x02
449 } wm_products[] = {
450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
451 "Intel i82542 1000BASE-X Ethernet",
452 WM_T_82542_2_1, WMP_F_1000X },
453
454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
455 "Intel i82543GC 1000BASE-X Ethernet",
456 WM_T_82543, WMP_F_1000X },
457
458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
459 "Intel i82543GC 1000BASE-T Ethernet",
460 WM_T_82543, WMP_F_1000T },
461
462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
463 "Intel i82544EI 1000BASE-T Ethernet",
464 WM_T_82544, WMP_F_1000T },
465
466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
467 "Intel i82544EI 1000BASE-X Ethernet",
468 WM_T_82544, WMP_F_1000X },
469
470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
471 "Intel i82544GC 1000BASE-T Ethernet",
472 WM_T_82544, WMP_F_1000T },
473
474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
475 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
476 WM_T_82544, WMP_F_1000T },
477
478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
479 "Intel i82540EM 1000BASE-T Ethernet",
480 WM_T_82540, WMP_F_1000T },
481
482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
483 "Intel i82545EM 1000BASE-T Ethernet",
484 WM_T_82545, WMP_F_1000T },
485
486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
487 "Intel i82546EB 1000BASE-T Ethernet",
488 WM_T_82546, WMP_F_1000T },
489
490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
491 "Intel i82545EM 1000BASE-X Ethernet",
492 WM_T_82545, WMP_F_1000X },
493
494 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
495 "Intel i82546EB 1000BASE-X Ethernet",
496 WM_T_82546, WMP_F_1000X },
497
498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
499 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
500 WM_T_82540, WMP_F_1000T },
501
502 { 0, 0,
503 NULL,
504 0, 0 },
505 };
506
507 #ifdef WM_EVENT_COUNTERS
508 #if WM_NTXSEGS != 16
509 #error Update wm_txseg_evcnt_names
510 #endif
511 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
512 "txseg1",
513 "txseg2",
514 "txseg3",
515 "txseg4",
516 "txseg5",
517 "txseg6",
518 "txseg7",
519 "txseg8",
520 "txseg9",
521 "txseg10",
522 "txseg11",
523 "txseg12",
524 "txseg13",
525 "txseg14",
526 "txseg15",
527 "txseg16",
528 };
529 #endif /* WM_EVENT_COUNTERS */
530
531 static const struct wm_product *
532 wm_lookup(const struct pci_attach_args *pa)
533 {
534 const struct wm_product *wmp;
535
536 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
537 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
538 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
539 return (wmp);
540 }
541 return (NULL);
542 }
543
544 int
545 wm_match(struct device *parent, struct cfdata *cf, void *aux)
546 {
547 struct pci_attach_args *pa = aux;
548
549 if (wm_lookup(pa) != NULL)
550 return (1);
551
552 return (0);
553 }
554
555 void
556 wm_attach(struct device *parent, struct device *self, void *aux)
557 {
558 struct wm_softc *sc = (void *) self;
559 struct pci_attach_args *pa = aux;
560 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
561 pci_chipset_tag_t pc = pa->pa_pc;
562 pci_intr_handle_t ih;
563 const char *intrstr = NULL;
564 bus_space_tag_t memt;
565 bus_space_handle_t memh;
566 bus_dma_segment_t seg;
567 int memh_valid;
568 int i, rseg, error;
569 const struct wm_product *wmp;
570 uint8_t enaddr[ETHER_ADDR_LEN];
571 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
572 pcireg_t preg, memtype;
573 int pmreg;
574
575 callout_init(&sc->sc_tick_ch);
576
577 wmp = wm_lookup(pa);
578 if (wmp == NULL) {
579 printf("\n");
580 panic("wm_attach: impossible");
581 }
582
583 sc->sc_dmat = pa->pa_dmat;
584
585 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
586 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
587
588 sc->sc_type = wmp->wmp_type;
589 if (sc->sc_type < WM_T_82543) {
590 if (preg < 2) {
591 printf("%s: i82542 must be at least rev. 2\n",
592 sc->sc_dev.dv_xname);
593 return;
594 }
595 if (preg < 3)
596 sc->sc_type = WM_T_82542_2_0;
597 }
598
599 /*
600 * Some chips require a handshake to access the EEPROM.
601 */
602 if (sc->sc_type >= WM_T_82540)
603 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
604
605 /*
606 * Map the device.
607 */
608 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
609 switch (memtype) {
610 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
611 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
612 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
613 memtype, 0, &memt, &memh, NULL, NULL) == 0);
614 break;
615 default:
616 memh_valid = 0;
617 }
618
619 if (memh_valid) {
620 sc->sc_st = memt;
621 sc->sc_sh = memh;
622 } else {
623 printf("%s: unable to map device registers\n",
624 sc->sc_dev.dv_xname);
625 return;
626 }
627
628 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
629 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
630 preg |= PCI_COMMAND_MASTER_ENABLE;
631 if (sc->sc_type < WM_T_82542_2_1)
632 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
633 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
634
635 /* Get it out of power save mode, if needed. */
636 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
637 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
638 if (preg == 3) {
639 /*
640 * The card has lost all configuration data in
641 * this state, so punt.
642 */
643 printf("%s: unable to wake from power state D3\n",
644 sc->sc_dev.dv_xname);
645 return;
646 }
647 if (preg != 0) {
648 printf("%s: waking up from power state D%d\n",
649 sc->sc_dev.dv_xname, preg);
650 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
651 }
652 }
653
654 /*
655 * Map and establish our interrupt.
656 */
657 if (pci_intr_map(pa, &ih)) {
658 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
659 return;
660 }
661 intrstr = pci_intr_string(pc, ih);
662 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
663 if (sc->sc_ih == NULL) {
664 printf("%s: unable to establish interrupt",
665 sc->sc_dev.dv_xname);
666 if (intrstr != NULL)
667 printf(" at %s", intrstr);
668 printf("\n");
669 return;
670 }
671 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
672
673 /*
674 * Allocate the control data structures, and create and load the
675 * DMA map for it.
676 */
677 if ((error = bus_dmamem_alloc(sc->sc_dmat,
678 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
679 0)) != 0) {
680 printf("%s: unable to allocate control data, error = %d\n",
681 sc->sc_dev.dv_xname, error);
682 goto fail_0;
683 }
684
685 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
686 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
687 BUS_DMA_COHERENT)) != 0) {
688 printf("%s: unable to map control data, error = %d\n",
689 sc->sc_dev.dv_xname, error);
690 goto fail_1;
691 }
692
693 if ((error = bus_dmamap_create(sc->sc_dmat,
694 sizeof(struct wm_control_data), 1,
695 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
696 printf("%s: unable to create control data DMA map, "
697 "error = %d\n", sc->sc_dev.dv_xname, error);
698 goto fail_2;
699 }
700
701 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
702 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
703 0)) != 0) {
704 printf("%s: unable to load control data DMA map, error = %d\n",
705 sc->sc_dev.dv_xname, error);
706 goto fail_3;
707 }
708
709 /*
710 * Create the transmit buffer DMA maps.
711 */
712 for (i = 0; i < WM_TXQUEUELEN; i++) {
713 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
714 WM_NTXSEGS, MCLBYTES, 0, 0,
715 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
716 printf("%s: unable to create Tx DMA map %d, "
717 "error = %d\n", sc->sc_dev.dv_xname, i, error);
718 goto fail_4;
719 }
720 }
721
722 /*
723 * Create the receive buffer DMA maps.
724 */
725 for (i = 0; i < WM_NRXDESC; i++) {
726 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
727 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
728 printf("%s: unable to create Rx DMA map %d, "
729 "error = %d\n", sc->sc_dev.dv_xname, i, error);
730 goto fail_5;
731 }
732 sc->sc_rxsoft[i].rxs_mbuf = NULL;
733 }
734
735 /*
736 * Reset the chip to a known state.
737 */
738 wm_reset(sc);
739
740 /*
741 * Read the Ethernet address from the EEPROM.
742 */
743 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
744 sizeof(myea) / sizeof(myea[0]), myea);
745 enaddr[0] = myea[0] & 0xff;
746 enaddr[1] = myea[0] >> 8;
747 enaddr[2] = myea[1] & 0xff;
748 enaddr[3] = myea[1] >> 8;
749 enaddr[4] = myea[2] & 0xff;
750 enaddr[5] = myea[2] >> 8;
751
752 /*
753 * Toggle the LSB of the MAC address on the second port
754 * of the i82546.
755 */
756 if (sc->sc_type == WM_T_82546) {
757 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
758 enaddr[5] ^= 1;
759 }
760
761 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
762 ether_sprintf(enaddr));
763
764 /*
765 * Read the config info from the EEPROM, and set up various
766 * bits in the control registers based on their contents.
767 */
768 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
769 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
770 if (sc->sc_type >= WM_T_82544)
771 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
772
773 if (cfg1 & EEPROM_CFG1_ILOS)
774 sc->sc_ctrl |= CTRL_ILOS;
775 if (sc->sc_type >= WM_T_82544) {
776 sc->sc_ctrl |=
777 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
778 CTRL_SWDPIO_SHIFT;
779 sc->sc_ctrl |=
780 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
781 CTRL_SWDPINS_SHIFT;
782 } else {
783 sc->sc_ctrl |=
784 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
785 CTRL_SWDPIO_SHIFT;
786 }
787
788 #if 0
789 if (sc->sc_type >= WM_T_82544) {
790 if (cfg1 & EEPROM_CFG1_IPS0)
791 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
792 if (cfg1 & EEPROM_CFG1_IPS1)
793 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
794 sc->sc_ctrl_ext |=
795 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
796 CTRL_EXT_SWDPIO_SHIFT;
797 sc->sc_ctrl_ext |=
798 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
799 CTRL_EXT_SWDPINS_SHIFT;
800 } else {
801 sc->sc_ctrl_ext |=
802 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
803 CTRL_EXT_SWDPIO_SHIFT;
804 }
805 #endif
806
807 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
808 #if 0
809 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
810 #endif
811
812 /*
813 * Set up some register offsets that are different between
814 * the i82542 and the i82543 and later chips.
815 */
816 if (sc->sc_type < WM_T_82543) {
817 sc->sc_rdt_reg = WMREG_OLD_RDT0;
818 sc->sc_tdt_reg = WMREG_OLD_TDT;
819 } else {
820 sc->sc_rdt_reg = WMREG_RDT;
821 sc->sc_tdt_reg = WMREG_TDT;
822 }
823
824 /*
825 * Determine if we should use flow control. We should
826 * always use it, unless we're on a i82542 < 2.1.
827 */
828 if (sc->sc_type >= WM_T_82542_2_1)
829 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
830
831 /*
832 * Determine if we're TBI or GMII mode, and initialize the
833 * media structures accordingly.
834 */
835 if (sc->sc_type < WM_T_82543 ||
836 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
837 if (wmp->wmp_flags & WMP_F_1000T)
838 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
839 "product!\n", sc->sc_dev.dv_xname);
840 wm_tbi_mediainit(sc);
841 } else {
842 if (wmp->wmp_flags & WMP_F_1000X)
843 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
844 "product!\n", sc->sc_dev.dv_xname);
845 wm_gmii_mediainit(sc);
846 }
847
848 ifp = &sc->sc_ethercom.ec_if;
849 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
850 ifp->if_softc = sc;
851 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
852 ifp->if_ioctl = wm_ioctl;
853 ifp->if_start = wm_start;
854 ifp->if_watchdog = wm_watchdog;
855 ifp->if_init = wm_init;
856 ifp->if_stop = wm_stop;
857 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
858 IFQ_SET_READY(&ifp->if_snd);
859
860 /*
861 * If we're a i82543 or greater, we can support VLANs.
862 */
863 if (sc->sc_type >= WM_T_82543)
864 sc->sc_ethercom.ec_capabilities |=
865 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
866
867 /*
868 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
869 * on i82543 and later.
870 */
871 if (sc->sc_type >= WM_T_82543)
872 ifp->if_capabilities |=
873 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
874
875 /*
876 * Attach the interface.
877 */
878 if_attach(ifp);
879 ether_ifattach(ifp, enaddr);
880
881 #ifdef WM_EVENT_COUNTERS
882 /* Attach event counters. */
883 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
884 NULL, sc->sc_dev.dv_xname, "txsstall");
885 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
886 NULL, sc->sc_dev.dv_xname, "txdstall");
887 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
888 NULL, sc->sc_dev.dv_xname, "txforceintr");
889 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
890 NULL, sc->sc_dev.dv_xname, "txdw");
891 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
892 NULL, sc->sc_dev.dv_xname, "txqe");
893 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
894 NULL, sc->sc_dev.dv_xname, "rxintr");
895 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
896 NULL, sc->sc_dev.dv_xname, "linkintr");
897
898 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
899 NULL, sc->sc_dev.dv_xname, "rxipsum");
900 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
901 NULL, sc->sc_dev.dv_xname, "rxtusum");
902 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
903 NULL, sc->sc_dev.dv_xname, "txipsum");
904 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
905 NULL, sc->sc_dev.dv_xname, "txtusum");
906
907 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
908 NULL, sc->sc_dev.dv_xname, "txctx init");
909 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
910 NULL, sc->sc_dev.dv_xname, "txctx hit");
911 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
912 NULL, sc->sc_dev.dv_xname, "txctx miss");
913
914 for (i = 0; i < WM_NTXSEGS; i++)
915 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
916 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
917
918 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
919 NULL, sc->sc_dev.dv_xname, "txdrop");
920
921 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
922 NULL, sc->sc_dev.dv_xname, "tu");
923 #endif /* WM_EVENT_COUNTERS */
924
925 /*
926 * Make sure the interface is shutdown during reboot.
927 */
928 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
929 if (sc->sc_sdhook == NULL)
930 printf("%s: WARNING: unable to establish shutdown hook\n",
931 sc->sc_dev.dv_xname);
932 return;
933
934 /*
935 * Free any resources we've allocated during the failed attach
936 * attempt. Do this in reverse order and fall through.
937 */
938 fail_5:
939 for (i = 0; i < WM_NRXDESC; i++) {
940 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
941 bus_dmamap_destroy(sc->sc_dmat,
942 sc->sc_rxsoft[i].rxs_dmamap);
943 }
944 fail_4:
945 for (i = 0; i < WM_TXQUEUELEN; i++) {
946 if (sc->sc_txsoft[i].txs_dmamap != NULL)
947 bus_dmamap_destroy(sc->sc_dmat,
948 sc->sc_txsoft[i].txs_dmamap);
949 }
950 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
951 fail_3:
952 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
953 fail_2:
954 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
955 sizeof(struct wm_control_data));
956 fail_1:
957 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
958 fail_0:
959 return;
960 }
961
962 /*
963 * wm_shutdown:
964 *
965 * Make sure the interface is stopped at reboot time.
966 */
967 void
968 wm_shutdown(void *arg)
969 {
970 struct wm_softc *sc = arg;
971
972 wm_stop(&sc->sc_ethercom.ec_if, 1);
973 }
974
975 /*
976 * wm_tx_cksum:
977 *
978 * Set up TCP/IP checksumming parameters for the
979 * specified packet.
980 */
981 static int
982 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
983 uint32_t *fieldsp)
984 {
985 struct mbuf *m0 = txs->txs_mbuf;
986 struct livengood_tcpip_ctxdesc *t;
987 uint32_t fields = 0, ipcs, tucs;
988 struct ip *ip;
989 struct ether_header *eh;
990 int offset, iphl;
991
992 /*
993 * XXX It would be nice if the mbuf pkthdr had offset
994 * fields for the protocol headers.
995 */
996
997 eh = mtod(m0, struct ether_header *);
998 switch (htons(eh->ether_type)) {
999 case ETHERTYPE_IP:
1000 iphl = sizeof(struct ip);
1001 offset = ETHER_HDR_LEN;
1002 break;
1003
1004 default:
1005 /*
1006 * Don't support this protocol or encapsulation.
1007 */
1008 *fieldsp = 0;
1009 *cmdp = 0;
1010 return (0);
1011 }
1012
1013 /* XXX */
1014 if (m0->m_len < (offset + iphl)) {
1015 printf("%s: wm_tx_cksum: need to m_pullup, "
1016 "packet dropped\n", sc->sc_dev.dv_xname);
1017 return (EINVAL);
1018 }
1019
1020 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1021 iphl = ip->ip_hl << 2;
1022
1023 /*
1024 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1025 * offload feature, if we load the context descriptor, we
1026 * MUST provide valid values for IPCSS and TUCSS fields.
1027 */
1028
1029 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1030 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1031 fields |= htole32(WTX_IXSM);
1032 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1033 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1034 WTX_TCPIP_IPCSE(offset + iphl - 1));
1035 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1036 /* Use the cached value. */
1037 ipcs = sc->sc_txctx_ipcs;
1038 } else {
1039 /* Just initialize it to the likely value anyway. */
1040 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1041 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1042 WTX_TCPIP_IPCSE(offset + iphl - 1));
1043 }
1044
1045 offset += iphl;
1046
1047 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1048 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1049 fields |= htole32(WTX_TXSM);
1050 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1051 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1052 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1053 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1054 /* Use the cached value. */
1055 tucs = sc->sc_txctx_tucs;
1056 } else {
1057 /* Just initialize it to a valid TCP context. */
1058 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1059 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1060 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1061 }
1062
1063 if (sc->sc_txctx_ipcs == ipcs &&
1064 sc->sc_txctx_tucs == tucs) {
1065 /* Cached context is fine. */
1066 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1067 } else {
1068 /* Fill in the context descriptor. */
1069 #ifdef WM_EVENT_COUNTERS
1070 if (sc->sc_txctx_ipcs == 0xffffffff &&
1071 sc->sc_txctx_tucs == 0xffffffff)
1072 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1073 else
1074 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1075 #endif
1076 t = (struct livengood_tcpip_ctxdesc *)
1077 &sc->sc_txdescs[sc->sc_txnext];
1078 t->tcpip_ipcs = ipcs;
1079 t->tcpip_tucs = tucs;
1080 t->tcpip_cmdlen =
1081 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1082 t->tcpip_seg = 0;
1083 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1084
1085 sc->sc_txctx_ipcs = ipcs;
1086 sc->sc_txctx_tucs = tucs;
1087
1088 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1089 txs->txs_ndesc++;
1090 }
1091
1092 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1093 *fieldsp = fields;
1094
1095 return (0);
1096 }
1097
1098 /*
1099 * wm_start: [ifnet interface function]
1100 *
1101 * Start packet transmission on the interface.
1102 */
1103 void
1104 wm_start(struct ifnet *ifp)
1105 {
1106 struct wm_softc *sc = ifp->if_softc;
1107 struct mbuf *m0/*, *m*/;
1108 struct wm_txsoft *txs;
1109 bus_dmamap_t dmamap;
1110 int error, nexttx, lasttx, ofree, seg;
1111 uint32_t cksumcmd, cksumfields;
1112
1113 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1114 return;
1115
1116 /*
1117 * Remember the previous number of free descriptors.
1118 */
1119 ofree = sc->sc_txfree;
1120
1121 /*
1122 * Loop through the send queue, setting up transmit descriptors
1123 * until we drain the queue, or use up all available transmit
1124 * descriptors.
1125 */
1126 for (;;) {
1127 /* Grab a packet off the queue. */
1128 IFQ_POLL(&ifp->if_snd, m0);
1129 if (m0 == NULL)
1130 break;
1131
1132 DPRINTF(WM_DEBUG_TX,
1133 ("%s: TX: have packet to transmit: %p\n",
1134 sc->sc_dev.dv_xname, m0));
1135
1136 /* Get a work queue entry. */
1137 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1138 wm_txintr(sc);
1139 if (sc->sc_txsfree == 0) {
1140 DPRINTF(WM_DEBUG_TX,
1141 ("%s: TX: no free job descriptors\n",
1142 sc->sc_dev.dv_xname));
1143 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1144 break;
1145 }
1146 }
1147
1148 txs = &sc->sc_txsoft[sc->sc_txsnext];
1149 dmamap = txs->txs_dmamap;
1150
1151 /*
1152 * Load the DMA map. If this fails, the packet either
1153 * didn't fit in the allotted number of segments, or we
1154 * were short on resources. For the too-many-segments
1155 * case, we simply report an error and drop the packet,
1156 * since we can't sanely copy a jumbo packet to a single
1157 * buffer.
1158 */
1159 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1160 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1161 if (error) {
1162 if (error == EFBIG) {
1163 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1164 printf("%s: Tx packet consumes too many "
1165 "DMA segments, dropping...\n",
1166 sc->sc_dev.dv_xname);
1167 IFQ_DEQUEUE(&ifp->if_snd, m0);
1168 m_freem(m0);
1169 continue;
1170 }
1171 /*
1172 * Short on resources, just stop for now.
1173 */
1174 DPRINTF(WM_DEBUG_TX,
1175 ("%s: TX: dmamap load failed: %d\n",
1176 sc->sc_dev.dv_xname, error));
1177 break;
1178 }
1179
1180 /*
1181 * Ensure we have enough descriptors free to describe
1182 * the packet. Note, we always reserve one descriptor
1183 * at the end of the ring due to the semantics of the
1184 * TDT register, plus one more in the event we need
1185 * to re-load checksum offload context.
1186 */
1187 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1188 /*
1189 * Not enough free descriptors to transmit this
1190 * packet. We haven't committed anything yet,
1191 * so just unload the DMA map, put the packet
1192 * pack on the queue, and punt. Notify the upper
1193 * layer that there are no more slots left.
1194 */
1195 DPRINTF(WM_DEBUG_TX,
1196 ("%s: TX: need %d descriptors, have %d\n",
1197 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1198 sc->sc_txfree - 1));
1199 ifp->if_flags |= IFF_OACTIVE;
1200 bus_dmamap_unload(sc->sc_dmat, dmamap);
1201 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1202 break;
1203 }
1204
1205 IFQ_DEQUEUE(&ifp->if_snd, m0);
1206
1207 /*
1208 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1209 */
1210
1211 /* Sync the DMA map. */
1212 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1213 BUS_DMASYNC_PREWRITE);
1214
1215 DPRINTF(WM_DEBUG_TX,
1216 ("%s: TX: packet has %d DMA segments\n",
1217 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1218
1219 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1220
1221 /*
1222 * Store a pointer to the packet so that we can free it
1223 * later.
1224 *
1225 * Initially, we consider the number of descriptors the
1226 * packet uses the number of DMA segments. This may be
1227 * incremented by 1 if we do checksum offload (a descriptor
1228 * is used to set the checksum context).
1229 */
1230 txs->txs_mbuf = m0;
1231 txs->txs_firstdesc = sc->sc_txnext;
1232 txs->txs_ndesc = dmamap->dm_nsegs;
1233
1234 /*
1235 * Set up checksum offload parameters for
1236 * this packet.
1237 */
1238 if (m0->m_pkthdr.csum_flags &
1239 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1240 if (wm_tx_cksum(sc, txs, &cksumcmd,
1241 &cksumfields) != 0) {
1242 /* Error message already displayed. */
1243 m_freem(m0);
1244 bus_dmamap_unload(sc->sc_dmat, dmamap);
1245 txs->txs_mbuf = NULL;
1246 continue;
1247 }
1248 } else {
1249 cksumcmd = 0;
1250 cksumfields = 0;
1251 }
1252
1253 cksumcmd |= htole32(WTX_CMD_IDE);
1254
1255 /*
1256 * Initialize the transmit descriptor.
1257 */
1258 for (nexttx = sc->sc_txnext, seg = 0;
1259 seg < dmamap->dm_nsegs;
1260 seg++, nexttx = WM_NEXTTX(nexttx)) {
1261 /*
1262 * Note: we currently only use 32-bit DMA
1263 * addresses.
1264 */
1265 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1266 htole32(dmamap->dm_segs[seg].ds_addr);
1267 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1268 htole32(dmamap->dm_segs[seg].ds_len);
1269 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1270 cksumfields;
1271 lasttx = nexttx;
1272
1273 DPRINTF(WM_DEBUG_TX,
1274 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1275 sc->sc_dev.dv_xname, nexttx,
1276 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1277 (uint32_t) dmamap->dm_segs[seg].ds_len));
1278 }
1279
1280 /*
1281 * Set up the command byte on the last descriptor of
1282 * the packet. If we're in the interrupt delay window,
1283 * delay the interrupt.
1284 */
1285 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1286 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1287
1288 #if 0 /* XXXJRT */
1289 /*
1290 * If VLANs are enabled and the packet has a VLAN tag, set
1291 * up the descriptor to encapsulate the packet for us.
1292 *
1293 * This is only valid on the last descriptor of the packet.
1294 */
1295 if (sc->sc_ethercom.ec_nvlans != 0 &&
1296 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1297 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1298 htole32(WTX_CMD_VLE);
1299 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1300 = htole16(*mtod(m, int *) & 0xffff);
1301 }
1302 #endif /* XXXJRT */
1303
1304 txs->txs_lastdesc = lasttx;
1305
1306 DPRINTF(WM_DEBUG_TX,
1307 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1308 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1309
1310 /* Sync the descriptors we're using. */
1311 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1312 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1313
1314 /* Give the packet to the chip. */
1315 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1316
1317 DPRINTF(WM_DEBUG_TX,
1318 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1319
1320 DPRINTF(WM_DEBUG_TX,
1321 ("%s: TX: finished transmitting packet, job %d\n",
1322 sc->sc_dev.dv_xname, sc->sc_txsnext));
1323
1324 /* Advance the tx pointer. */
1325 sc->sc_txfree -= txs->txs_ndesc;
1326 sc->sc_txnext = nexttx;
1327
1328 sc->sc_txsfree--;
1329 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1330
1331 #if NBPFILTER > 0
1332 /* Pass the packet to any BPF listeners. */
1333 if (ifp->if_bpf)
1334 bpf_mtap(ifp->if_bpf, m0);
1335 #endif /* NBPFILTER > 0 */
1336 }
1337
1338 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1339 /* No more slots; notify upper layer. */
1340 ifp->if_flags |= IFF_OACTIVE;
1341 }
1342
1343 if (sc->sc_txfree != ofree) {
1344 /* Set a watchdog timer in case the chip flakes out. */
1345 ifp->if_timer = 5;
1346 }
1347 }
1348
1349 /*
1350 * wm_watchdog: [ifnet interface function]
1351 *
1352 * Watchdog timer handler.
1353 */
1354 void
1355 wm_watchdog(struct ifnet *ifp)
1356 {
1357 struct wm_softc *sc = ifp->if_softc;
1358
1359 /*
1360 * Since we're using delayed interrupts, sweep up
1361 * before we report an error.
1362 */
1363 wm_txintr(sc);
1364
1365 if (sc->sc_txfree != WM_NTXDESC) {
1366 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1367 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1368 sc->sc_txnext);
1369 ifp->if_oerrors++;
1370
1371 /* Reset the interface. */
1372 (void) wm_init(ifp);
1373 }
1374
1375 /* Try to get more packets going. */
1376 wm_start(ifp);
1377 }
1378
1379 /*
1380 * wm_ioctl: [ifnet interface function]
1381 *
1382 * Handle control requests from the operator.
1383 */
1384 int
1385 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1386 {
1387 struct wm_softc *sc = ifp->if_softc;
1388 struct ifreq *ifr = (struct ifreq *) data;
1389 int s, error;
1390
1391 s = splnet();
1392
1393 switch (cmd) {
1394 case SIOCSIFMEDIA:
1395 case SIOCGIFMEDIA:
1396 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1397 break;
1398
1399 default:
1400 error = ether_ioctl(ifp, cmd, data);
1401 if (error == ENETRESET) {
1402 /*
1403 * Multicast list has changed; set the hardware filter
1404 * accordingly.
1405 */
1406 wm_set_filter(sc);
1407 error = 0;
1408 }
1409 break;
1410 }
1411
1412 /* Try to get more packets going. */
1413 wm_start(ifp);
1414
1415 splx(s);
1416 return (error);
1417 }
1418
1419 /*
1420 * wm_intr:
1421 *
1422 * Interrupt service routine.
1423 */
1424 int
1425 wm_intr(void *arg)
1426 {
1427 struct wm_softc *sc = arg;
1428 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1429 uint32_t icr;
1430 int wantinit, handled = 0;
1431
1432 for (wantinit = 0; wantinit == 0;) {
1433 icr = CSR_READ(sc, WMREG_ICR);
1434 if ((icr & sc->sc_icr) == 0)
1435 break;
1436
1437 handled = 1;
1438
1439 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1440 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1441 DPRINTF(WM_DEBUG_RX,
1442 ("%s: RX: got Rx intr 0x%08x\n",
1443 sc->sc_dev.dv_xname,
1444 icr & (ICR_RXDMT0|ICR_RXT0)));
1445 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1446 }
1447 #endif
1448 wm_rxintr(sc);
1449
1450 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1451 if (icr & ICR_TXDW) {
1452 DPRINTF(WM_DEBUG_TX,
1453 ("%s: TX: got TDXW interrupt\n",
1454 sc->sc_dev.dv_xname));
1455 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1456 }
1457 #endif
1458 wm_txintr(sc);
1459
1460 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1461 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1462 wm_linkintr(sc, icr);
1463 }
1464
1465 if (icr & ICR_RXO) {
1466 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1467 wantinit = 1;
1468 }
1469 }
1470
1471 if (handled) {
1472 if (wantinit)
1473 wm_init(ifp);
1474
1475 /* Try to get more packets going. */
1476 wm_start(ifp);
1477 }
1478
1479 return (handled);
1480 }
1481
1482 /*
1483 * wm_txintr:
1484 *
1485 * Helper; handle transmit interrupts.
1486 */
1487 void
1488 wm_txintr(struct wm_softc *sc)
1489 {
1490 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1491 struct wm_txsoft *txs;
1492 uint8_t status;
1493 int i;
1494
1495 ifp->if_flags &= ~IFF_OACTIVE;
1496
1497 /*
1498 * Go through the Tx list and free mbufs for those
1499 * frames which have been transmitted.
1500 */
1501 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1502 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1503 txs = &sc->sc_txsoft[i];
1504
1505 DPRINTF(WM_DEBUG_TX,
1506 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1507
1508 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1509 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1510
1511 status = le32toh(sc->sc_txdescs[
1512 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1513 if ((status & WTX_ST_DD) == 0)
1514 break;
1515
1516 DPRINTF(WM_DEBUG_TX,
1517 ("%s: TX: job %d done: descs %d..%d\n",
1518 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1519 txs->txs_lastdesc));
1520
1521 /*
1522 * XXX We should probably be using the statistics
1523 * XXX registers, but I don't know if they exist
1524 * XXX on chips before the i82544.
1525 */
1526
1527 #ifdef WM_EVENT_COUNTERS
1528 if (status & WTX_ST_TU)
1529 WM_EVCNT_INCR(&sc->sc_ev_tu);
1530 #endif /* WM_EVENT_COUNTERS */
1531
1532 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1533 ifp->if_oerrors++;
1534 if (status & WTX_ST_LC)
1535 printf("%s: late collision\n",
1536 sc->sc_dev.dv_xname);
1537 else if (status & WTX_ST_EC) {
1538 ifp->if_collisions += 16;
1539 printf("%s: excessive collisions\n",
1540 sc->sc_dev.dv_xname);
1541 }
1542 } else
1543 ifp->if_opackets++;
1544
1545 sc->sc_txfree += txs->txs_ndesc;
1546 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1547 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1548 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1549 m_freem(txs->txs_mbuf);
1550 txs->txs_mbuf = NULL;
1551 }
1552
1553 /* Update the dirty transmit buffer pointer. */
1554 sc->sc_txsdirty = i;
1555 DPRINTF(WM_DEBUG_TX,
1556 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1557
1558 /*
1559 * If there are no more pending transmissions, cancel the watchdog
1560 * timer.
1561 */
1562 if (sc->sc_txsfree == WM_TXQUEUELEN)
1563 ifp->if_timer = 0;
1564 }
1565
1566 /*
1567 * wm_rxintr:
1568 *
1569 * Helper; handle receive interrupts.
1570 */
1571 void
1572 wm_rxintr(struct wm_softc *sc)
1573 {
1574 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1575 struct wm_rxsoft *rxs;
1576 struct mbuf *m;
1577 int i, len;
1578 uint8_t status, errors;
1579
1580 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1581 rxs = &sc->sc_rxsoft[i];
1582
1583 DPRINTF(WM_DEBUG_RX,
1584 ("%s: RX: checking descriptor %d\n",
1585 sc->sc_dev.dv_xname, i));
1586
1587 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1588
1589 status = sc->sc_rxdescs[i].wrx_status;
1590 errors = sc->sc_rxdescs[i].wrx_errors;
1591 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1592
1593 if ((status & WRX_ST_DD) == 0) {
1594 /*
1595 * We have processed all of the receive descriptors.
1596 */
1597 break;
1598 }
1599
1600 if (__predict_false(sc->sc_rxdiscard)) {
1601 DPRINTF(WM_DEBUG_RX,
1602 ("%s: RX: discarding contents of descriptor %d\n",
1603 sc->sc_dev.dv_xname, i));
1604 WM_INIT_RXDESC(sc, i);
1605 if (status & WRX_ST_EOP) {
1606 /* Reset our state. */
1607 DPRINTF(WM_DEBUG_RX,
1608 ("%s: RX: resetting rxdiscard -> 0\n",
1609 sc->sc_dev.dv_xname));
1610 sc->sc_rxdiscard = 0;
1611 }
1612 continue;
1613 }
1614
1615 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1616 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1617
1618 m = rxs->rxs_mbuf;
1619
1620 /*
1621 * Add a new receive buffer to the ring.
1622 */
1623 if (wm_add_rxbuf(sc, i) != 0) {
1624 /*
1625 * Failed, throw away what we've done so
1626 * far, and discard the rest of the packet.
1627 */
1628 ifp->if_ierrors++;
1629 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1630 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1631 WM_INIT_RXDESC(sc, i);
1632 if ((status & WRX_ST_EOP) == 0)
1633 sc->sc_rxdiscard = 1;
1634 if (sc->sc_rxhead != NULL)
1635 m_freem(sc->sc_rxhead);
1636 WM_RXCHAIN_RESET(sc);
1637 DPRINTF(WM_DEBUG_RX,
1638 ("%s: RX: Rx buffer allocation failed, "
1639 "dropping packet%s\n", sc->sc_dev.dv_xname,
1640 sc->sc_rxdiscard ? " (discard)" : ""));
1641 continue;
1642 }
1643
1644 WM_RXCHAIN_LINK(sc, m);
1645
1646 m->m_len = len;
1647
1648 DPRINTF(WM_DEBUG_RX,
1649 ("%s: RX: buffer at %p len %d\n",
1650 sc->sc_dev.dv_xname, m->m_data, len));
1651
1652 /*
1653 * If this is not the end of the packet, keep
1654 * looking.
1655 */
1656 if ((status & WRX_ST_EOP) == 0) {
1657 sc->sc_rxlen += len;
1658 DPRINTF(WM_DEBUG_RX,
1659 ("%s: RX: not yet EOP, rxlen -> %d\n",
1660 sc->sc_dev.dv_xname, sc->sc_rxlen));
1661 continue;
1662 }
1663
1664 /*
1665 * Okay, we have the entire packet now...
1666 */
1667 *sc->sc_rxtailp = NULL;
1668 m = sc->sc_rxhead;
1669 len += sc->sc_rxlen;
1670
1671 WM_RXCHAIN_RESET(sc);
1672
1673 DPRINTF(WM_DEBUG_RX,
1674 ("%s: RX: have entire packet, len -> %d\n",
1675 sc->sc_dev.dv_xname, len));
1676
1677 /*
1678 * If an error occurred, update stats and drop the packet.
1679 */
1680 if (errors &
1681 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1682 ifp->if_ierrors++;
1683 if (errors & WRX_ER_SE)
1684 printf("%s: symbol error\n",
1685 sc->sc_dev.dv_xname);
1686 else if (errors & WRX_ER_SEQ)
1687 printf("%s: receive sequence error\n",
1688 sc->sc_dev.dv_xname);
1689 else if (errors & WRX_ER_CE)
1690 printf("%s: CRC error\n",
1691 sc->sc_dev.dv_xname);
1692 m_freem(m);
1693 continue;
1694 }
1695
1696 /*
1697 * No errors. Receive the packet.
1698 *
1699 * Note, we have configured the chip to include the
1700 * CRC with every packet.
1701 */
1702 m->m_flags |= M_HASFCS;
1703 m->m_pkthdr.rcvif = ifp;
1704 m->m_pkthdr.len = len;
1705
1706 #if 0 /* XXXJRT */
1707 /*
1708 * If VLANs are enabled, VLAN packets have been unwrapped
1709 * for us. Associate the tag with the packet.
1710 */
1711 if (sc->sc_ethercom.ec_nvlans != 0 &&
1712 (status & WRX_ST_VP) != 0) {
1713 struct mbuf *vtag;
1714
1715 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1716 if (vtag == NULL) {
1717 ifp->if_ierrors++;
1718 printf("%s: unable to allocate VLAN tag\n",
1719 sc->sc_dev.dv_xname);
1720 m_freem(m);
1721 continue;
1722 }
1723
1724 *mtod(m, int *) =
1725 le16toh(sc->sc_rxdescs[i].wrx_special);
1726 vtag->m_len = sizeof(int);
1727 }
1728 #endif /* XXXJRT */
1729
1730 /*
1731 * Set up checksum info for this packet.
1732 */
1733 if (status & WRX_ST_IPCS) {
1734 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1735 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1736 if (errors & WRX_ER_IPE)
1737 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1738 }
1739 if (status & WRX_ST_TCPCS) {
1740 /*
1741 * Note: we don't know if this was TCP or UDP,
1742 * so we just set both bits, and expect the
1743 * upper layers to deal.
1744 */
1745 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1746 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1747 if (errors & WRX_ER_TCPE)
1748 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1749 }
1750
1751 ifp->if_ipackets++;
1752
1753 #if NBPFILTER > 0
1754 /* Pass this up to any BPF listeners. */
1755 if (ifp->if_bpf)
1756 bpf_mtap(ifp->if_bpf, m);
1757 #endif /* NBPFILTER > 0 */
1758
1759 /* Pass it on. */
1760 (*ifp->if_input)(ifp, m);
1761 }
1762
1763 /* Update the receive pointer. */
1764 sc->sc_rxptr = i;
1765
1766 DPRINTF(WM_DEBUG_RX,
1767 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1768 }
1769
1770 /*
1771 * wm_linkintr:
1772 *
1773 * Helper; handle link interrupts.
1774 */
1775 void
1776 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1777 {
1778 uint32_t status;
1779
1780 /*
1781 * If we get a link status interrupt on a 1000BASE-T
1782 * device, just fall into the normal MII tick path.
1783 */
1784 if (sc->sc_flags & WM_F_HAS_MII) {
1785 if (icr & ICR_LSC) {
1786 DPRINTF(WM_DEBUG_LINK,
1787 ("%s: LINK: LSC -> mii_tick\n",
1788 sc->sc_dev.dv_xname));
1789 mii_tick(&sc->sc_mii);
1790 } else if (icr & ICR_RXSEQ) {
1791 DPRINTF(WM_DEBUG_LINK,
1792 ("%s: LINK Receive sequence error\n",
1793 sc->sc_dev.dv_xname));
1794 }
1795 return;
1796 }
1797
1798 /*
1799 * If we are now receiving /C/, check for link again in
1800 * a couple of link clock ticks.
1801 */
1802 if (icr & ICR_RXCFG) {
1803 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1804 sc->sc_dev.dv_xname));
1805 sc->sc_tbi_anstate = 2;
1806 }
1807
1808 if (icr & ICR_LSC) {
1809 status = CSR_READ(sc, WMREG_STATUS);
1810 if (status & STATUS_LU) {
1811 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1812 sc->sc_dev.dv_xname,
1813 (status & STATUS_FD) ? "FDX" : "HDX"));
1814 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1815 if (status & STATUS_FD)
1816 sc->sc_tctl |=
1817 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1818 else
1819 sc->sc_tctl |=
1820 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1821 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1822 sc->sc_tbi_linkup = 1;
1823 } else {
1824 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1825 sc->sc_dev.dv_xname));
1826 sc->sc_tbi_linkup = 0;
1827 }
1828 sc->sc_tbi_anstate = 2;
1829 wm_tbi_set_linkled(sc);
1830 } else if (icr & ICR_RXSEQ) {
1831 DPRINTF(WM_DEBUG_LINK,
1832 ("%s: LINK: Receive sequence error\n",
1833 sc->sc_dev.dv_xname));
1834 }
1835 }
1836
1837 /*
1838 * wm_tick:
1839 *
1840 * One second timer, used to check link status, sweep up
1841 * completed transmit jobs, etc.
1842 */
1843 void
1844 wm_tick(void *arg)
1845 {
1846 struct wm_softc *sc = arg;
1847 int s;
1848
1849 s = splnet();
1850
1851 if (sc->sc_flags & WM_F_HAS_MII)
1852 mii_tick(&sc->sc_mii);
1853 else
1854 wm_tbi_check_link(sc);
1855
1856 splx(s);
1857
1858 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1859 }
1860
1861 /*
1862 * wm_reset:
1863 *
1864 * Reset the i82542 chip.
1865 */
1866 void
1867 wm_reset(struct wm_softc *sc)
1868 {
1869 int i;
1870
1871 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1872 delay(10000);
1873
1874 for (i = 0; i < 1000; i++) {
1875 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1876 return;
1877 delay(20);
1878 }
1879
1880 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1881 printf("%s: WARNING: reset failed to complete\n",
1882 sc->sc_dev.dv_xname);
1883 }
1884
1885 /*
1886 * wm_init: [ifnet interface function]
1887 *
1888 * Initialize the interface. Must be called at splnet().
1889 */
1890 int
1891 wm_init(struct ifnet *ifp)
1892 {
1893 struct wm_softc *sc = ifp->if_softc;
1894 struct wm_rxsoft *rxs;
1895 int i, error = 0;
1896 uint32_t reg;
1897
1898 /* Cancel any pending I/O. */
1899 wm_stop(ifp, 0);
1900
1901 /* Reset the chip to a known state. */
1902 wm_reset(sc);
1903
1904 /* Initialize the transmit descriptor ring. */
1905 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1906 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1907 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1908 sc->sc_txfree = WM_NTXDESC;
1909 sc->sc_txnext = 0;
1910
1911 sc->sc_txctx_ipcs = 0xffffffff;
1912 sc->sc_txctx_tucs = 0xffffffff;
1913
1914 if (sc->sc_type < WM_T_82543) {
1915 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1916 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1917 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1918 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1919 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1920 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1921 } else {
1922 CSR_WRITE(sc, WMREG_TBDAH, 0);
1923 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1924 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1925 CSR_WRITE(sc, WMREG_TDH, 0);
1926 CSR_WRITE(sc, WMREG_TDT, 0);
1927 CSR_WRITE(sc, WMREG_TIDV, 128);
1928
1929 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1930 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1931 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1932 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1933 }
1934 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1935 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1936
1937 /* Initialize the transmit job descriptors. */
1938 for (i = 0; i < WM_TXQUEUELEN; i++)
1939 sc->sc_txsoft[i].txs_mbuf = NULL;
1940 sc->sc_txsfree = WM_TXQUEUELEN;
1941 sc->sc_txsnext = 0;
1942 sc->sc_txsdirty = 0;
1943
1944 /*
1945 * Initialize the receive descriptor and receive job
1946 * descriptor rings.
1947 */
1948 if (sc->sc_type < WM_T_82543) {
1949 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1950 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1951 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1952 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1953 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1954 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1955
1956 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1957 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1958 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1959 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1960 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1961 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1962 } else {
1963 CSR_WRITE(sc, WMREG_RDBAH, 0);
1964 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1965 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1966 CSR_WRITE(sc, WMREG_RDH, 0);
1967 CSR_WRITE(sc, WMREG_RDT, 0);
1968 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1969 }
1970 for (i = 0; i < WM_NRXDESC; i++) {
1971 rxs = &sc->sc_rxsoft[i];
1972 if (rxs->rxs_mbuf == NULL) {
1973 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1974 printf("%s: unable to allocate or map rx "
1975 "buffer %d, error = %d\n",
1976 sc->sc_dev.dv_xname, i, error);
1977 /*
1978 * XXX Should attempt to run with fewer receive
1979 * XXX buffers instead of just failing.
1980 */
1981 wm_rxdrain(sc);
1982 goto out;
1983 }
1984 } else
1985 WM_INIT_RXDESC(sc, i);
1986 }
1987 sc->sc_rxptr = 0;
1988 sc->sc_rxdiscard = 0;
1989 WM_RXCHAIN_RESET(sc);
1990
1991 /*
1992 * Clear out the VLAN table -- we don't use it (yet).
1993 */
1994 CSR_WRITE(sc, WMREG_VET, 0);
1995 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1996 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1997
1998 /*
1999 * Set up flow-control parameters.
2000 *
2001 * XXX Values could probably stand some tuning.
2002 */
2003 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2004 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2005 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2006 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2007
2008 if (sc->sc_type < WM_T_82543) {
2009 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2010 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2011 } else {
2012 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2013 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2014 }
2015 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2016 }
2017
2018 #if 0 /* XXXJRT */
2019 /* Deal with VLAN enables. */
2020 if (sc->sc_ethercom.ec_nvlans != 0)
2021 sc->sc_ctrl |= CTRL_VME;
2022 else
2023 #endif /* XXXJRT */
2024 sc->sc_ctrl &= ~CTRL_VME;
2025
2026 /* Write the control registers. */
2027 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2028 #if 0
2029 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2030 #endif
2031
2032 /*
2033 * Set up checksum offload parameters.
2034 */
2035 reg = CSR_READ(sc, WMREG_RXCSUM);
2036 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2037 reg |= RXCSUM_IPOFL;
2038 else
2039 reg &= ~RXCSUM_IPOFL;
2040 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2041 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2042 else {
2043 reg &= ~RXCSUM_TUOFL;
2044 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2045 reg &= ~RXCSUM_IPOFL;
2046 }
2047 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2048
2049 /*
2050 * Set up the interrupt registers.
2051 */
2052 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2053 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2054 ICR_RXO | ICR_RXT0;
2055 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2056 sc->sc_icr |= ICR_RXCFG;
2057 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2058
2059 /* Set up the inter-packet gap. */
2060 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2061
2062 #if 0 /* XXXJRT */
2063 /* Set the VLAN ethernetype. */
2064 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2065 #endif
2066
2067 /*
2068 * Set up the transmit control register; we start out with
2069 * a collision distance suitable for FDX, but update it whe
2070 * we resolve the media type.
2071 */
2072 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2073 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2074 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2075
2076 /* Set the media. */
2077 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2078
2079 /*
2080 * Set up the receive control register; we actually program
2081 * the register when we set the receive filter. Use multicast
2082 * address offset type 0.
2083 *
2084 * Only the i82544 has the ability to strip the incoming
2085 * CRC, so we don't enable that feature.
2086 */
2087 sc->sc_mchash_type = 0;
2088 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2089 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2090
2091 /* Set the receive filter. */
2092 wm_set_filter(sc);
2093
2094 /* Start the one second link check clock. */
2095 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2096
2097 /* ...all done! */
2098 ifp->if_flags |= IFF_RUNNING;
2099 ifp->if_flags &= ~IFF_OACTIVE;
2100
2101 out:
2102 if (error)
2103 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2104 return (error);
2105 }
2106
2107 /*
2108 * wm_rxdrain:
2109 *
2110 * Drain the receive queue.
2111 */
2112 void
2113 wm_rxdrain(struct wm_softc *sc)
2114 {
2115 struct wm_rxsoft *rxs;
2116 int i;
2117
2118 for (i = 0; i < WM_NRXDESC; i++) {
2119 rxs = &sc->sc_rxsoft[i];
2120 if (rxs->rxs_mbuf != NULL) {
2121 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2122 m_freem(rxs->rxs_mbuf);
2123 rxs->rxs_mbuf = NULL;
2124 }
2125 }
2126 }
2127
2128 /*
2129 * wm_stop: [ifnet interface function]
2130 *
2131 * Stop transmission on the interface.
2132 */
2133 void
2134 wm_stop(struct ifnet *ifp, int disable)
2135 {
2136 struct wm_softc *sc = ifp->if_softc;
2137 struct wm_txsoft *txs;
2138 int i;
2139
2140 /* Stop the one second clock. */
2141 callout_stop(&sc->sc_tick_ch);
2142
2143 if (sc->sc_flags & WM_F_HAS_MII) {
2144 /* Down the MII. */
2145 mii_down(&sc->sc_mii);
2146 }
2147
2148 /* Stop the transmit and receive processes. */
2149 CSR_WRITE(sc, WMREG_TCTL, 0);
2150 CSR_WRITE(sc, WMREG_RCTL, 0);
2151
2152 /* Release any queued transmit buffers. */
2153 for (i = 0; i < WM_TXQUEUELEN; i++) {
2154 txs = &sc->sc_txsoft[i];
2155 if (txs->txs_mbuf != NULL) {
2156 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2157 m_freem(txs->txs_mbuf);
2158 txs->txs_mbuf = NULL;
2159 }
2160 }
2161
2162 if (disable)
2163 wm_rxdrain(sc);
2164
2165 /* Mark the interface as down and cancel the watchdog timer. */
2166 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2167 ifp->if_timer = 0;
2168 }
2169
2170 /*
2171 * wm_read_eeprom:
2172 *
2173 * Read data from the serial EEPROM.
2174 */
2175 void
2176 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2177 {
2178 uint32_t reg;
2179 int i, x, addrbits = 6;
2180
2181 for (i = 0; i < wordcnt; i++) {
2182 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2183 reg = CSR_READ(sc, WMREG_EECD);
2184
2185 /* Get number of address bits. */
2186 if (reg & EECD_EE_SIZE)
2187 addrbits = 8;
2188
2189 /* Request EEPROM access. */
2190 reg |= EECD_EE_REQ;
2191 CSR_WRITE(sc, WMREG_EECD, reg);
2192
2193 /* ..and wait for it to be granted. */
2194 for (x = 0; x < 100; x++) {
2195 reg = CSR_READ(sc, WMREG_EECD);
2196 if (reg & EECD_EE_GNT)
2197 break;
2198 delay(5);
2199 }
2200 if ((reg & EECD_EE_GNT) == 0) {
2201 printf("%s: could not acquire EEPROM GNT\n",
2202 sc->sc_dev.dv_xname);
2203 *data = 0xffff;
2204 reg &= ~EECD_EE_REQ;
2205 CSR_WRITE(sc, WMREG_EECD, reg);
2206 continue;
2207 }
2208 } else
2209 reg = 0;
2210
2211 /* Clear SK and DI. */
2212 reg &= ~(EECD_SK | EECD_DI);
2213 CSR_WRITE(sc, WMREG_EECD, reg);
2214
2215 /* Set CHIP SELECT. */
2216 reg |= EECD_CS;
2217 CSR_WRITE(sc, WMREG_EECD, reg);
2218 delay(2);
2219
2220 /* Shift in the READ command. */
2221 for (x = 3; x > 0; x--) {
2222 if (UWIRE_OPC_READ & (1 << (x - 1)))
2223 reg |= EECD_DI;
2224 else
2225 reg &= ~EECD_DI;
2226 CSR_WRITE(sc, WMREG_EECD, reg);
2227 delay(2);
2228 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2229 delay(2);
2230 CSR_WRITE(sc, WMREG_EECD, reg);
2231 delay(2);
2232 }
2233
2234 /* Shift in address. */
2235 for (x = addrbits; x > 0; x--) {
2236 if ((word + i) & (1 << (x - 1)))
2237 reg |= EECD_DI;
2238 else
2239 reg &= ~EECD_DI;
2240 CSR_WRITE(sc, WMREG_EECD, reg);
2241 delay(2);
2242 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2243 delay(2);
2244 CSR_WRITE(sc, WMREG_EECD, reg);
2245 delay(2);
2246 }
2247
2248 /* Shift out the data. */
2249 reg &= ~EECD_DI;
2250 data[i] = 0;
2251 for (x = 16; x > 0; x--) {
2252 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2253 delay(2);
2254 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2255 data[i] |= (1 << (x - 1));
2256 CSR_WRITE(sc, WMREG_EECD, reg);
2257 delay(2);
2258 }
2259
2260 /* Clear CHIP SELECT. */
2261 reg &= ~EECD_CS;
2262 CSR_WRITE(sc, WMREG_EECD, reg);
2263 delay(2);
2264
2265 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2266 /* Release the EEPROM. */
2267 reg &= ~EECD_EE_REQ;
2268 CSR_WRITE(sc, WMREG_EECD, reg);
2269 }
2270 }
2271 }
2272
2273 /*
2274 * wm_add_rxbuf:
2275 *
2276 * Add a receive buffer to the indiciated descriptor.
2277 */
2278 int
2279 wm_add_rxbuf(struct wm_softc *sc, int idx)
2280 {
2281 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2282 struct mbuf *m;
2283 int error;
2284
2285 MGETHDR(m, M_DONTWAIT, MT_DATA);
2286 if (m == NULL)
2287 return (ENOBUFS);
2288
2289 MCLGET(m, M_DONTWAIT);
2290 if ((m->m_flags & M_EXT) == 0) {
2291 m_freem(m);
2292 return (ENOBUFS);
2293 }
2294
2295 if (rxs->rxs_mbuf != NULL)
2296 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2297
2298 rxs->rxs_mbuf = m;
2299
2300 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2301 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2302 BUS_DMA_READ|BUS_DMA_NOWAIT);
2303 if (error) {
2304 printf("%s: unable to load rx DMA map %d, error = %d\n",
2305 sc->sc_dev.dv_xname, idx, error);
2306 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2307 }
2308
2309 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2310 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2311
2312 WM_INIT_RXDESC(sc, idx);
2313
2314 return (0);
2315 }
2316
2317 /*
2318 * wm_set_ral:
2319 *
2320 * Set an entery in the receive address list.
2321 */
2322 static void
2323 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2324 {
2325 uint32_t ral_lo, ral_hi;
2326
2327 if (enaddr != NULL) {
2328 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2329 (enaddr[3] << 24);
2330 ral_hi = enaddr[4] | (enaddr[5] << 8);
2331 ral_hi |= RAL_AV;
2332 } else {
2333 ral_lo = 0;
2334 ral_hi = 0;
2335 }
2336
2337 if (sc->sc_type >= WM_T_82544) {
2338 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2339 ral_lo);
2340 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2341 ral_hi);
2342 } else {
2343 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2344 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2345 }
2346 }
2347
2348 /*
2349 * wm_mchash:
2350 *
2351 * Compute the hash of the multicast address for the 4096-bit
2352 * multicast filter.
2353 */
2354 static uint32_t
2355 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2356 {
2357 static const int lo_shift[4] = { 4, 3, 2, 0 };
2358 static const int hi_shift[4] = { 4, 5, 6, 8 };
2359 uint32_t hash;
2360
2361 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2362 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2363
2364 return (hash & 0xfff);
2365 }
2366
2367 /*
2368 * wm_set_filter:
2369 *
2370 * Set up the receive filter.
2371 */
2372 void
2373 wm_set_filter(struct wm_softc *sc)
2374 {
2375 struct ethercom *ec = &sc->sc_ethercom;
2376 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2377 struct ether_multi *enm;
2378 struct ether_multistep step;
2379 bus_addr_t mta_reg;
2380 uint32_t hash, reg, bit;
2381 int i;
2382
2383 if (sc->sc_type >= WM_T_82544)
2384 mta_reg = WMREG_CORDOVA_MTA;
2385 else
2386 mta_reg = WMREG_MTA;
2387
2388 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2389
2390 if (ifp->if_flags & IFF_BROADCAST)
2391 sc->sc_rctl |= RCTL_BAM;
2392 if (ifp->if_flags & IFF_PROMISC) {
2393 sc->sc_rctl |= RCTL_UPE;
2394 goto allmulti;
2395 }
2396
2397 /*
2398 * Set the station address in the first RAL slot, and
2399 * clear the remaining slots.
2400 */
2401 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2402 for (i = 1; i < WM_RAL_TABSIZE; i++)
2403 wm_set_ral(sc, NULL, i);
2404
2405 /* Clear out the multicast table. */
2406 for (i = 0; i < WM_MC_TABSIZE; i++)
2407 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2408
2409 ETHER_FIRST_MULTI(step, ec, enm);
2410 while (enm != NULL) {
2411 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2412 /*
2413 * We must listen to a range of multicast addresses.
2414 * For now, just accept all multicasts, rather than
2415 * trying to set only those filter bits needed to match
2416 * the range. (At this time, the only use of address
2417 * ranges is for IP multicast routing, for which the
2418 * range is big enough to require all bits set.)
2419 */
2420 goto allmulti;
2421 }
2422
2423 hash = wm_mchash(sc, enm->enm_addrlo);
2424
2425 reg = (hash >> 5) & 0x7f;
2426 bit = hash & 0x1f;
2427
2428 hash = CSR_READ(sc, mta_reg + (reg << 2));
2429 hash |= 1U << bit;
2430
2431 /* XXX Hardware bug?? */
2432 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2433 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2434 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2435 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2436 } else
2437 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2438
2439 ETHER_NEXT_MULTI(step, enm);
2440 }
2441
2442 ifp->if_flags &= ~IFF_ALLMULTI;
2443 goto setit;
2444
2445 allmulti:
2446 ifp->if_flags |= IFF_ALLMULTI;
2447 sc->sc_rctl |= RCTL_MPE;
2448
2449 setit:
2450 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2451 }
2452
2453 /*
2454 * wm_tbi_mediainit:
2455 *
2456 * Initialize media for use on 1000BASE-X devices.
2457 */
2458 void
2459 wm_tbi_mediainit(struct wm_softc *sc)
2460 {
2461 const char *sep = "";
2462
2463 if (sc->sc_type < WM_T_82543)
2464 sc->sc_tipg = TIPG_WM_DFLT;
2465 else
2466 sc->sc_tipg = TIPG_LG_DFLT;
2467
2468 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2469 wm_tbi_mediastatus);
2470
2471 /*
2472 * SWD Pins:
2473 *
2474 * 0 = Link LED (output)
2475 * 1 = Loss Of Signal (input)
2476 */
2477 sc->sc_ctrl |= CTRL_SWDPIO(0);
2478 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2479
2480 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2481
2482 #define ADD(s, m, d) \
2483 do { \
2484 printf("%s%s", sep, s); \
2485 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2486 sep = ", "; \
2487 } while (/*CONSTCOND*/0)
2488
2489 printf("%s: ", sc->sc_dev.dv_xname);
2490 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2491 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2492 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2493 printf("\n");
2494
2495 #undef ADD
2496
2497 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2498 }
2499
2500 /*
2501 * wm_tbi_mediastatus: [ifmedia interface function]
2502 *
2503 * Get the current interface media status on a 1000BASE-X device.
2504 */
2505 void
2506 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2507 {
2508 struct wm_softc *sc = ifp->if_softc;
2509
2510 ifmr->ifm_status = IFM_AVALID;
2511 ifmr->ifm_active = IFM_ETHER;
2512
2513 if (sc->sc_tbi_linkup == 0) {
2514 ifmr->ifm_active |= IFM_NONE;
2515 return;
2516 }
2517
2518 ifmr->ifm_status |= IFM_ACTIVE;
2519 ifmr->ifm_active |= IFM_1000_SX;
2520 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2521 ifmr->ifm_active |= IFM_FDX;
2522 }
2523
2524 /*
2525 * wm_tbi_mediachange: [ifmedia interface function]
2526 *
2527 * Set hardware to newly-selected media on a 1000BASE-X device.
2528 */
2529 int
2530 wm_tbi_mediachange(struct ifnet *ifp)
2531 {
2532 struct wm_softc *sc = ifp->if_softc;
2533 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2534 uint32_t status;
2535 int i;
2536
2537 sc->sc_txcw = ife->ifm_data;
2538 if (sc->sc_ctrl & CTRL_RFCE)
2539 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2540 if (sc->sc_ctrl & CTRL_TFCE)
2541 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2542 sc->sc_txcw |= TXCW_ANE;
2543
2544 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2545 delay(10000);
2546
2547 sc->sc_tbi_anstate = 0;
2548
2549 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2550 /* Have signal; wait for the link to come up. */
2551 for (i = 0; i < 50; i++) {
2552 delay(10000);
2553 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2554 break;
2555 }
2556
2557 status = CSR_READ(sc, WMREG_STATUS);
2558 if (status & STATUS_LU) {
2559 /* Link is up. */
2560 DPRINTF(WM_DEBUG_LINK,
2561 ("%s: LINK: set media -> link up %s\n",
2562 sc->sc_dev.dv_xname,
2563 (status & STATUS_FD) ? "FDX" : "HDX"));
2564 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2565 if (status & STATUS_FD)
2566 sc->sc_tctl |=
2567 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2568 else
2569 sc->sc_tctl |=
2570 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2571 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2572 sc->sc_tbi_linkup = 1;
2573 } else {
2574 /* Link is down. */
2575 DPRINTF(WM_DEBUG_LINK,
2576 ("%s: LINK: set media -> link down\n",
2577 sc->sc_dev.dv_xname));
2578 sc->sc_tbi_linkup = 0;
2579 }
2580 } else {
2581 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2582 sc->sc_dev.dv_xname));
2583 sc->sc_tbi_linkup = 0;
2584 }
2585
2586 wm_tbi_set_linkled(sc);
2587
2588 return (0);
2589 }
2590
2591 /*
2592 * wm_tbi_set_linkled:
2593 *
2594 * Update the link LED on 1000BASE-X devices.
2595 */
2596 void
2597 wm_tbi_set_linkled(struct wm_softc *sc)
2598 {
2599
2600 if (sc->sc_tbi_linkup)
2601 sc->sc_ctrl |= CTRL_SWDPIN(0);
2602 else
2603 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2604
2605 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2606 }
2607
2608 /*
2609 * wm_tbi_check_link:
2610 *
2611 * Check the link on 1000BASE-X devices.
2612 */
2613 void
2614 wm_tbi_check_link(struct wm_softc *sc)
2615 {
2616 uint32_t rxcw, ctrl, status;
2617
2618 if (sc->sc_tbi_anstate == 0)
2619 return;
2620 else if (sc->sc_tbi_anstate > 1) {
2621 DPRINTF(WM_DEBUG_LINK,
2622 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2623 sc->sc_tbi_anstate));
2624 sc->sc_tbi_anstate--;
2625 return;
2626 }
2627
2628 sc->sc_tbi_anstate = 0;
2629
2630 rxcw = CSR_READ(sc, WMREG_RXCW);
2631 ctrl = CSR_READ(sc, WMREG_CTRL);
2632 status = CSR_READ(sc, WMREG_STATUS);
2633
2634 if ((status & STATUS_LU) == 0) {
2635 DPRINTF(WM_DEBUG_LINK,
2636 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2637 sc->sc_tbi_linkup = 0;
2638 } else {
2639 DPRINTF(WM_DEBUG_LINK,
2640 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2641 (status & STATUS_FD) ? "FDX" : "HDX"));
2642 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2643 if (status & STATUS_FD)
2644 sc->sc_tctl |=
2645 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2646 else
2647 sc->sc_tctl |=
2648 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2649 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2650 sc->sc_tbi_linkup = 1;
2651 }
2652
2653 wm_tbi_set_linkled(sc);
2654 }
2655
2656 /*
2657 * wm_gmii_reset:
2658 *
2659 * Reset the PHY.
2660 */
2661 void
2662 wm_gmii_reset(struct wm_softc *sc)
2663 {
2664 uint32_t reg;
2665
2666 if (sc->sc_type >= WM_T_82544) {
2667 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2668 delay(20000);
2669
2670 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2671 delay(20000);
2672 } else {
2673 /* The PHY reset pin is active-low. */
2674 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2675 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2676 CTRL_EXT_SWDPIN(4));
2677 reg |= CTRL_EXT_SWDPIO(4);
2678
2679 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2680 delay(10);
2681
2682 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2683 delay(10);
2684
2685 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2686 delay(10);
2687 #if 0
2688 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2689 #endif
2690 }
2691 }
2692
2693 /*
2694 * wm_gmii_mediainit:
2695 *
2696 * Initialize media for use on 1000BASE-T devices.
2697 */
2698 void
2699 wm_gmii_mediainit(struct wm_softc *sc)
2700 {
2701 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2702
2703 /* We have MII. */
2704 sc->sc_flags |= WM_F_HAS_MII;
2705
2706 sc->sc_tipg = TIPG_1000T_DFLT;
2707
2708 /*
2709 * Let the chip set speed/duplex on its own based on
2710 * signals from the PHY.
2711 */
2712 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2713 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2714
2715 /* Initialize our media structures and probe the GMII. */
2716 sc->sc_mii.mii_ifp = ifp;
2717
2718 if (sc->sc_type >= WM_T_82544) {
2719 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2720 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2721 } else {
2722 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2723 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2724 }
2725 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2726
2727 wm_gmii_reset(sc);
2728
2729 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2730 wm_gmii_mediastatus);
2731
2732 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2733 MII_OFFSET_ANY, 0);
2734 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2735 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2736 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2737 } else
2738 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2739 }
2740
2741 /*
2742 * wm_gmii_mediastatus: [ifmedia interface function]
2743 *
2744 * Get the current interface media status on a 1000BASE-T device.
2745 */
2746 void
2747 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2748 {
2749 struct wm_softc *sc = ifp->if_softc;
2750
2751 mii_pollstat(&sc->sc_mii);
2752 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2753 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2754 }
2755
2756 /*
2757 * wm_gmii_mediachange: [ifmedia interface function]
2758 *
2759 * Set hardware to newly-selected media on a 1000BASE-T device.
2760 */
2761 int
2762 wm_gmii_mediachange(struct ifnet *ifp)
2763 {
2764 struct wm_softc *sc = ifp->if_softc;
2765
2766 if (ifp->if_flags & IFF_UP)
2767 mii_mediachg(&sc->sc_mii);
2768 return (0);
2769 }
2770
2771 #define MDI_IO CTRL_SWDPIN(2)
2772 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2773 #define MDI_CLK CTRL_SWDPIN(3)
2774
2775 static void
2776 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2777 {
2778 uint32_t i, v;
2779
2780 v = CSR_READ(sc, WMREG_CTRL);
2781 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2782 v |= MDI_DIR | CTRL_SWDPIO(3);
2783
2784 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2785 if (data & i)
2786 v |= MDI_IO;
2787 else
2788 v &= ~MDI_IO;
2789 CSR_WRITE(sc, WMREG_CTRL, v);
2790 delay(10);
2791 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2792 delay(10);
2793 CSR_WRITE(sc, WMREG_CTRL, v);
2794 delay(10);
2795 }
2796 }
2797
2798 static uint32_t
2799 i82543_mii_recvbits(struct wm_softc *sc)
2800 {
2801 uint32_t v, i, data = 0;
2802
2803 v = CSR_READ(sc, WMREG_CTRL);
2804 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2805 v |= CTRL_SWDPIO(3);
2806
2807 CSR_WRITE(sc, WMREG_CTRL, v);
2808 delay(10);
2809 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2810 delay(10);
2811 CSR_WRITE(sc, WMREG_CTRL, v);
2812 delay(10);
2813
2814 for (i = 0; i < 16; i++) {
2815 data <<= 1;
2816 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2817 delay(10);
2818 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2819 data |= 1;
2820 CSR_WRITE(sc, WMREG_CTRL, v);
2821 delay(10);
2822 }
2823
2824 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2825 delay(10);
2826 CSR_WRITE(sc, WMREG_CTRL, v);
2827 delay(10);
2828
2829 return (data);
2830 }
2831
2832 #undef MDI_IO
2833 #undef MDI_DIR
2834 #undef MDI_CLK
2835
2836 /*
2837 * wm_gmii_i82543_readreg: [mii interface function]
2838 *
2839 * Read a PHY register on the GMII (i82543 version).
2840 */
2841 int
2842 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2843 {
2844 struct wm_softc *sc = (void *) self;
2845 int rv;
2846
2847 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2848 i82543_mii_sendbits(sc, reg | (phy << 5) |
2849 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2850 rv = i82543_mii_recvbits(sc) & 0xffff;
2851
2852 DPRINTF(WM_DEBUG_GMII,
2853 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2854 sc->sc_dev.dv_xname, phy, reg, rv));
2855
2856 return (rv);
2857 }
2858
2859 /*
2860 * wm_gmii_i82543_writereg: [mii interface function]
2861 *
2862 * Write a PHY register on the GMII (i82543 version).
2863 */
2864 void
2865 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2866 {
2867 struct wm_softc *sc = (void *) self;
2868
2869 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2870 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2871 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2872 (MII_COMMAND_START << 30), 32);
2873 }
2874
2875 /*
2876 * wm_gmii_i82544_readreg: [mii interface function]
2877 *
2878 * Read a PHY register on the GMII.
2879 */
2880 int
2881 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2882 {
2883 struct wm_softc *sc = (void *) self;
2884 uint32_t mdic;
2885 int i, rv;
2886
2887 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2888 MDIC_REGADD(reg));
2889
2890 for (i = 0; i < 100; i++) {
2891 mdic = CSR_READ(sc, WMREG_MDIC);
2892 if (mdic & MDIC_READY)
2893 break;
2894 delay(10);
2895 }
2896
2897 if ((mdic & MDIC_READY) == 0) {
2898 printf("%s: MDIC read timed out: phy %d reg %d\n",
2899 sc->sc_dev.dv_xname, phy, reg);
2900 rv = 0;
2901 } else if (mdic & MDIC_E) {
2902 #if 0 /* This is normal if no PHY is present. */
2903 printf("%s: MDIC read error: phy %d reg %d\n",
2904 sc->sc_dev.dv_xname, phy, reg);
2905 #endif
2906 rv = 0;
2907 } else {
2908 rv = MDIC_DATA(mdic);
2909 if (rv == 0xffff)
2910 rv = 0;
2911 }
2912
2913 return (rv);
2914 }
2915
2916 /*
2917 * wm_gmii_i82544_writereg: [mii interface function]
2918 *
2919 * Write a PHY register on the GMII.
2920 */
2921 void
2922 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2923 {
2924 struct wm_softc *sc = (void *) self;
2925 uint32_t mdic;
2926 int i;
2927
2928 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2929 MDIC_REGADD(reg) | MDIC_DATA(val));
2930
2931 for (i = 0; i < 100; i++) {
2932 mdic = CSR_READ(sc, WMREG_MDIC);
2933 if (mdic & MDIC_READY)
2934 break;
2935 delay(10);
2936 }
2937
2938 if ((mdic & MDIC_READY) == 0)
2939 printf("%s: MDIC write timed out: phy %d reg %d\n",
2940 sc->sc_dev.dv_xname, phy, reg);
2941 else if (mdic & MDIC_E)
2942 printf("%s: MDIC write error: phy %d reg %d\n",
2943 sc->sc_dev.dv_xname, phy, reg);
2944 }
2945
2946 /*
2947 * wm_gmii_statchg: [mii interface function]
2948 *
2949 * Callback from MII layer when media changes.
2950 */
2951 void
2952 wm_gmii_statchg(struct device *self)
2953 {
2954 struct wm_softc *sc = (void *) self;
2955
2956 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2957
2958 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2959 DPRINTF(WM_DEBUG_LINK,
2960 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2961 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2962 } else {
2963 DPRINTF(WM_DEBUG_LINK,
2964 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2965 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2966 }
2967
2968 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2969 }
2970