if_wm.c revision 1.18 1 /* $NetBSD: if_wm.c,v 1.18 2002/08/15 18:29:02 briggs Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix TCP/UDP checksums.
44 * Status: Several successful transmissions with offloaded
45 * checksums occur. After several successful transmissions,
46 * the chip goes catatonic. The watchdog timer fires, which
47 * resets the chip, and gets things moving again, until the
48 * cycle repeats.
49 *
50 * - Make GMII work on the i82543.
51 *
52 * - Fix hw VLAN assist.
53 *
54 * - Jumbo frames -- requires changes to network stack due to
55 * lame buffer length handling on chip.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and manage
118 * up to 64 of them at a time. We allow up to 16 DMA segments per
119 * packet.
120 */
121 #define WM_NTXSEGS 16
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN 64
124 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
125 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
126 #define WM_NTXDESC 256
127 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
128 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
129 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
130
131 /*
132 * Receive descriptor list size. We have one Rx buffer for normal
133 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
134 * packet. We allocate 256 receive descriptors, each with a 2k
135 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
136 */
137 #define WM_NRXDESC 256
138 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
139 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
140 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
141
142 /*
143 * Control structures are DMA'd to the i82542 chip. We allocate them in
144 * a single clump that maps to a single DMA segment to make serveral things
145 * easier.
146 */
147 struct wm_control_data {
148 /*
149 * The transmit descriptors.
150 */
151 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
152
153 /*
154 * The receive descriptors.
155 */
156 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
157 };
158
159 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
160 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
161 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
162
163 /*
164 * Software state for transmit jobs.
165 */
166 struct wm_txsoft {
167 struct mbuf *txs_mbuf; /* head of our mbuf chain */
168 bus_dmamap_t txs_dmamap; /* our DMA map */
169 int txs_firstdesc; /* first descriptor in packet */
170 int txs_lastdesc; /* last descriptor in packet */
171 int txs_ndesc; /* # of descriptors used */
172 };
173
174 /*
175 * Software state for receive buffers. Each descriptor gets a
176 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
177 * more than one buffer, we chain them together.
178 */
179 struct wm_rxsoft {
180 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
181 bus_dmamap_t rxs_dmamap; /* our DMA map */
182 };
183
184 /*
185 * Software state per device.
186 */
187 struct wm_softc {
188 struct device sc_dev; /* generic device information */
189 bus_space_tag_t sc_st; /* bus space tag */
190 bus_space_handle_t sc_sh; /* bus space handle */
191 bus_dma_tag_t sc_dmat; /* bus DMA tag */
192 struct ethercom sc_ethercom; /* ethernet common data */
193 void *sc_sdhook; /* shutdown hook */
194
195 int sc_type; /* chip type; see below */
196 int sc_flags; /* flags; see below */
197
198 void *sc_ih; /* interrupt cookie */
199
200 struct mii_data sc_mii; /* MII/media information */
201
202 struct callout sc_tick_ch; /* tick callout */
203
204 bus_dmamap_t sc_cddmamap; /* control data DMA map */
205 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
206
207 /*
208 * Software state for the transmit and receive descriptors.
209 */
210 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
211 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
212
213 /*
214 * Control data structures.
215 */
216 struct wm_control_data *sc_control_data;
217 #define sc_txdescs sc_control_data->wcd_txdescs
218 #define sc_rxdescs sc_control_data->wcd_rxdescs
219
220 #ifdef WM_EVENT_COUNTERS
221 /* Event counters. */
222 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
223 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
224 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
225 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
226 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
227 struct evcnt sc_ev_rxintr; /* Rx interrupts */
228 struct evcnt sc_ev_linkintr; /* Link interrupts */
229
230 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
231 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
232 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
233 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
234
235 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
236 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
237 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
238
239 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
240 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
241
242 struct evcnt sc_ev_tu; /* Tx underrun */
243 #endif /* WM_EVENT_COUNTERS */
244
245 bus_addr_t sc_tdt_reg; /* offset of TDT register */
246
247 int sc_txfree; /* number of free Tx descriptors */
248 int sc_txnext; /* next ready Tx descriptor */
249
250 int sc_txsfree; /* number of free Tx jobs */
251 int sc_txsnext; /* next free Tx job */
252 int sc_txsdirty; /* dirty Tx jobs */
253
254 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
255 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
256
257 bus_addr_t sc_rdt_reg; /* offset of RDT register */
258
259 int sc_rxptr; /* next ready Rx descriptor/queue ent */
260 int sc_rxdiscard;
261 int sc_rxlen;
262 struct mbuf *sc_rxhead;
263 struct mbuf *sc_rxtail;
264 struct mbuf **sc_rxtailp;
265
266 uint32_t sc_ctrl; /* prototype CTRL register */
267 #if 0
268 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
269 #endif
270 uint32_t sc_icr; /* prototype interrupt bits */
271 uint32_t sc_tctl; /* prototype TCTL register */
272 uint32_t sc_rctl; /* prototype RCTL register */
273 uint32_t sc_txcw; /* prototype TXCW register */
274 uint32_t sc_tipg; /* prototype TIPG register */
275
276 int sc_tbi_linkup; /* TBI link status */
277 int sc_tbi_anstate; /* autonegotiation state */
278
279 int sc_mchash_type; /* multicast filter offset */
280 };
281
282 #define WM_RXCHAIN_RESET(sc) \
283 do { \
284 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
285 *(sc)->sc_rxtailp = NULL; \
286 (sc)->sc_rxlen = 0; \
287 } while (/*CONSTCOND*/0)
288
289 #define WM_RXCHAIN_LINK(sc, m) \
290 do { \
291 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
292 (sc)->sc_rxtailp = &(m)->m_next; \
293 } while (/*CONSTCOND*/0)
294
295 /* sc_type */
296 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
297 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
298 #define WM_T_82543 2 /* i82543 */
299 #define WM_T_82544 3 /* i82544 */
300 #define WM_T_82540 4 /* i82540 */
301 #define WM_T_82545 5 /* i82545 */
302 #define WM_T_82546 6 /* i82546 */
303
304 /* sc_flags */
305 #define WM_F_HAS_MII 0x01 /* has MII */
306 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
307
308 #ifdef WM_EVENT_COUNTERS
309 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
310 #else
311 #define WM_EVCNT_INCR(ev) /* nothing */
312 #endif
313
314 #define CSR_READ(sc, reg) \
315 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
316 #define CSR_WRITE(sc, reg, val) \
317 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
318
319 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
320 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
321
322 #define WM_CDTXSYNC(sc, x, n, ops) \
323 do { \
324 int __x, __n; \
325 \
326 __x = (x); \
327 __n = (n); \
328 \
329 /* If it will wrap around, sync to the end of the ring. */ \
330 if ((__x + __n) > WM_NTXDESC) { \
331 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
332 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
333 (WM_NTXDESC - __x), (ops)); \
334 __n -= (WM_NTXDESC - __x); \
335 __x = 0; \
336 } \
337 \
338 /* Now sync whatever is left. */ \
339 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
340 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
341 } while (/*CONSTCOND*/0)
342
343 #define WM_CDRXSYNC(sc, x, ops) \
344 do { \
345 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
346 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
347 } while (/*CONSTCOND*/0)
348
349 #define WM_INIT_RXDESC(sc, x) \
350 do { \
351 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
352 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
353 struct mbuf *__m = __rxs->rxs_mbuf; \
354 \
355 /* \
356 * Note: We scoot the packet forward 2 bytes in the buffer \
357 * so that the payload after the Ethernet header is aligned \
358 * to a 4-byte boundary. \
359 * \
360 * XXX BRAINDAMAGE ALERT! \
361 * The stupid chip uses the same size for every buffer, which \
362 * is set in the Receive Control register. We are using the 2K \
363 * size option, but what we REALLY want is (2K - 2)! For this \
364 * reason, we can't accept packets longer than the standard \
365 * Ethernet MTU, without incurring a big penalty to copy every \
366 * incoming packet to a new, suitably aligned buffer. \
367 * \
368 * We'll need to make some changes to the layer 3/4 parts of \
369 * the stack (to copy the headers to a new buffer if not \
370 * aligned) in order to support large MTU on this chip. Lame. \
371 */ \
372 __m->m_data = __m->m_ext.ext_buf + 2; \
373 \
374 __rxd->wrx_addr.wa_low = \
375 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
376 __rxd->wrx_addr.wa_high = 0; \
377 __rxd->wrx_len = 0; \
378 __rxd->wrx_cksum = 0; \
379 __rxd->wrx_status = 0; \
380 __rxd->wrx_errors = 0; \
381 __rxd->wrx_special = 0; \
382 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
383 \
384 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
385 } while (/*CONSTCOND*/0)
386
387 void wm_start(struct ifnet *);
388 void wm_watchdog(struct ifnet *);
389 int wm_ioctl(struct ifnet *, u_long, caddr_t);
390 int wm_init(struct ifnet *);
391 void wm_stop(struct ifnet *, int);
392
393 void wm_shutdown(void *);
394
395 void wm_reset(struct wm_softc *);
396 void wm_rxdrain(struct wm_softc *);
397 int wm_add_rxbuf(struct wm_softc *, int);
398 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
399 void wm_tick(void *);
400
401 void wm_set_filter(struct wm_softc *);
402
403 int wm_intr(void *);
404 void wm_txintr(struct wm_softc *);
405 void wm_rxintr(struct wm_softc *);
406 void wm_linkintr(struct wm_softc *, uint32_t);
407
408 void wm_tbi_mediainit(struct wm_softc *);
409 int wm_tbi_mediachange(struct ifnet *);
410 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
411
412 void wm_tbi_set_linkled(struct wm_softc *);
413 void wm_tbi_check_link(struct wm_softc *);
414
415 void wm_gmii_reset(struct wm_softc *);
416
417 int wm_gmii_i82543_readreg(struct device *, int, int);
418 void wm_gmii_i82543_writereg(struct device *, int, int, int);
419
420 int wm_gmii_i82544_readreg(struct device *, int, int);
421 void wm_gmii_i82544_writereg(struct device *, int, int, int);
422
423 void wm_gmii_statchg(struct device *);
424
425 void wm_gmii_mediainit(struct wm_softc *);
426 int wm_gmii_mediachange(struct ifnet *);
427 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
428
429 int wm_match(struct device *, struct cfdata *, void *);
430 void wm_attach(struct device *, struct device *, void *);
431
432 int wm_copy_small = 0;
433
434 struct cfattach wm_ca = {
435 sizeof(struct wm_softc), wm_match, wm_attach,
436 };
437
438 /*
439 * Devices supported by this driver.
440 */
441 const struct wm_product {
442 pci_vendor_id_t wmp_vendor;
443 pci_product_id_t wmp_product;
444 const char *wmp_name;
445 int wmp_type;
446 int wmp_flags;
447 #define WMP_F_1000X 0x01
448 #define WMP_F_1000T 0x02
449 } wm_products[] = {
450 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
451 "Intel i82542 1000BASE-X Ethernet",
452 WM_T_82542_2_1, WMP_F_1000X },
453
454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
455 "Intel i82543GC 1000BASE-X Ethernet",
456 WM_T_82543, WMP_F_1000X },
457
458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
459 "Intel i82543GC 1000BASE-T Ethernet",
460 WM_T_82543, WMP_F_1000T },
461
462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
463 "Intel i82544EI 1000BASE-T Ethernet",
464 WM_T_82544, WMP_F_1000T },
465
466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
467 "Intel i82544EI 1000BASE-X Ethernet",
468 WM_T_82544, WMP_F_1000X },
469
470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
471 "Intel i82544GC 1000BASE-T Ethernet",
472 WM_T_82544, WMP_F_1000T },
473
474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
475 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
476 WM_T_82544, WMP_F_1000T },
477
478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
479 "Intel i82540EM 1000BASE-T Ethernet",
480 WM_T_82540, WMP_F_1000T },
481
482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
483 "Intel i82545EM 1000BASE-T Ethernet",
484 WM_T_82545, WMP_F_1000T },
485
486 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
487 "Intel i82546EB 1000BASE-T Ethernet",
488 WM_T_82546, WMP_F_1000T },
489
490 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
491 "Intel i82545EM 1000BASE-X Ethernet",
492 WM_T_82545, WMP_F_1000X },
493
494 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
495 "Intel i82546EB 1000BASE-X Ethernet",
496 WM_T_82546, WMP_F_1000X },
497
498 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
499 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
500 WM_T_82540, WMP_F_1000T },
501
502 { 0, 0,
503 NULL,
504 0, 0 },
505 };
506
507 #ifdef WM_EVENT_COUNTERS
508 #if WM_NTXSEGS != 16
509 #error Update wm_txseg_evcnt_names
510 #endif
511 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
512 "txseg1",
513 "txseg2",
514 "txseg3",
515 "txseg4",
516 "txseg5",
517 "txseg6",
518 "txseg7",
519 "txseg8",
520 "txseg9",
521 "txseg10",
522 "txseg11",
523 "txseg12",
524 "txseg13",
525 "txseg14",
526 "txseg15",
527 "txseg16",
528 };
529 #endif /* WM_EVENT_COUNTERS */
530
531 static const struct wm_product *
532 wm_lookup(const struct pci_attach_args *pa)
533 {
534 const struct wm_product *wmp;
535
536 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
537 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
538 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
539 return (wmp);
540 }
541 return (NULL);
542 }
543
544 int
545 wm_match(struct device *parent, struct cfdata *cf, void *aux)
546 {
547 struct pci_attach_args *pa = aux;
548
549 if (wm_lookup(pa) != NULL)
550 return (1);
551
552 return (0);
553 }
554
555 void
556 wm_attach(struct device *parent, struct device *self, void *aux)
557 {
558 struct wm_softc *sc = (void *) self;
559 struct pci_attach_args *pa = aux;
560 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
561 pci_chipset_tag_t pc = pa->pa_pc;
562 pci_intr_handle_t ih;
563 const char *intrstr = NULL;
564 bus_space_tag_t memt;
565 bus_space_handle_t memh;
566 bus_dma_segment_t seg;
567 int memh_valid;
568 int i, rseg, error;
569 const struct wm_product *wmp;
570 uint8_t enaddr[ETHER_ADDR_LEN];
571 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
572 pcireg_t preg, memtype;
573 int pmreg;
574
575 callout_init(&sc->sc_tick_ch);
576
577 wmp = wm_lookup(pa);
578 if (wmp == NULL) {
579 printf("\n");
580 panic("wm_attach: impossible");
581 }
582
583 sc->sc_dmat = pa->pa_dmat;
584
585 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
586 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
587
588 sc->sc_type = wmp->wmp_type;
589 if (sc->sc_type < WM_T_82543) {
590 if (preg < 2) {
591 printf("%s: i82542 must be at least rev. 2\n",
592 sc->sc_dev.dv_xname);
593 return;
594 }
595 if (preg < 3)
596 sc->sc_type = WM_T_82542_2_0;
597 }
598
599 /*
600 * Some chips require a handshake to access the EEPROM.
601 */
602 if (sc->sc_type >= WM_T_82540)
603 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
604
605 /*
606 * Map the device.
607 */
608 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
609 switch (memtype) {
610 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
611 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
612 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
613 memtype, 0, &memt, &memh, NULL, NULL) == 0);
614 break;
615 default:
616 memh_valid = 0;
617 }
618
619 if (memh_valid) {
620 sc->sc_st = memt;
621 sc->sc_sh = memh;
622 } else {
623 printf("%s: unable to map device registers\n",
624 sc->sc_dev.dv_xname);
625 return;
626 }
627
628 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
629 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
630 preg |= PCI_COMMAND_MASTER_ENABLE;
631 if (sc->sc_type < WM_T_82542_2_1)
632 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
633 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
634
635 /* Get it out of power save mode, if needed. */
636 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
637 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
638 if (preg == 3) {
639 /*
640 * The card has lost all configuration data in
641 * this state, so punt.
642 */
643 printf("%s: unable to wake from power state D3\n",
644 sc->sc_dev.dv_xname);
645 return;
646 }
647 if (preg != 0) {
648 printf("%s: waking up from power state D%d\n",
649 sc->sc_dev.dv_xname, preg);
650 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
651 }
652 }
653
654 /*
655 * Map and establish our interrupt.
656 */
657 if (pci_intr_map(pa, &ih)) {
658 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
659 return;
660 }
661 intrstr = pci_intr_string(pc, ih);
662 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
663 if (sc->sc_ih == NULL) {
664 printf("%s: unable to establish interrupt",
665 sc->sc_dev.dv_xname);
666 if (intrstr != NULL)
667 printf(" at %s", intrstr);
668 printf("\n");
669 return;
670 }
671 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
672
673 /*
674 * Allocate the control data structures, and create and load the
675 * DMA map for it.
676 */
677 if ((error = bus_dmamem_alloc(sc->sc_dmat,
678 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
679 0)) != 0) {
680 printf("%s: unable to allocate control data, error = %d\n",
681 sc->sc_dev.dv_xname, error);
682 goto fail_0;
683 }
684
685 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
686 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
687 BUS_DMA_COHERENT)) != 0) {
688 printf("%s: unable to map control data, error = %d\n",
689 sc->sc_dev.dv_xname, error);
690 goto fail_1;
691 }
692
693 if ((error = bus_dmamap_create(sc->sc_dmat,
694 sizeof(struct wm_control_data), 1,
695 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
696 printf("%s: unable to create control data DMA map, "
697 "error = %d\n", sc->sc_dev.dv_xname, error);
698 goto fail_2;
699 }
700
701 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
702 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
703 0)) != 0) {
704 printf("%s: unable to load control data DMA map, error = %d\n",
705 sc->sc_dev.dv_xname, error);
706 goto fail_3;
707 }
708
709 /*
710 * Create the transmit buffer DMA maps.
711 */
712 for (i = 0; i < WM_TXQUEUELEN; i++) {
713 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
714 WM_NTXSEGS, MCLBYTES, 0, 0,
715 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
716 printf("%s: unable to create Tx DMA map %d, "
717 "error = %d\n", sc->sc_dev.dv_xname, i, error);
718 goto fail_4;
719 }
720 }
721
722 /*
723 * Create the receive buffer DMA maps.
724 */
725 for (i = 0; i < WM_NRXDESC; i++) {
726 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
727 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
728 printf("%s: unable to create Rx DMA map %d, "
729 "error = %d\n", sc->sc_dev.dv_xname, i, error);
730 goto fail_5;
731 }
732 sc->sc_rxsoft[i].rxs_mbuf = NULL;
733 }
734
735 /*
736 * Reset the chip to a known state.
737 */
738 wm_reset(sc);
739
740 /*
741 * Read the Ethernet address from the EEPROM.
742 */
743 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
744 sizeof(myea) / sizeof(myea[0]), myea);
745 enaddr[0] = myea[0] & 0xff;
746 enaddr[1] = myea[0] >> 8;
747 enaddr[2] = myea[1] & 0xff;
748 enaddr[3] = myea[1] >> 8;
749 enaddr[4] = myea[2] & 0xff;
750 enaddr[5] = myea[2] >> 8;
751
752 /*
753 * Toggle the LSB of the MAC address on the second port
754 * of the i82546.
755 */
756 if (sc->sc_type == WM_T_82546) {
757 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
758 enaddr[5] ^= 1;
759 }
760
761 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
762 ether_sprintf(enaddr));
763
764 /*
765 * Read the config info from the EEPROM, and set up various
766 * bits in the control registers based on their contents.
767 */
768 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
769 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
770 if (sc->sc_type >= WM_T_82544)
771 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
772
773 if (cfg1 & EEPROM_CFG1_ILOS)
774 sc->sc_ctrl |= CTRL_ILOS;
775 if (sc->sc_type >= WM_T_82544) {
776 sc->sc_ctrl |=
777 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
778 CTRL_SWDPIO_SHIFT;
779 sc->sc_ctrl |=
780 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
781 CTRL_SWDPINS_SHIFT;
782 } else {
783 sc->sc_ctrl |=
784 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
785 CTRL_SWDPIO_SHIFT;
786 }
787
788 #if 0
789 if (sc->sc_type >= WM_T_82544) {
790 if (cfg1 & EEPROM_CFG1_IPS0)
791 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
792 if (cfg1 & EEPROM_CFG1_IPS1)
793 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
794 sc->sc_ctrl_ext |=
795 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
796 CTRL_EXT_SWDPIO_SHIFT;
797 sc->sc_ctrl_ext |=
798 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
799 CTRL_EXT_SWDPINS_SHIFT;
800 } else {
801 sc->sc_ctrl_ext |=
802 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
803 CTRL_EXT_SWDPIO_SHIFT;
804 }
805 #endif
806
807 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
808 #if 0
809 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
810 #endif
811
812 /*
813 * Set up some register offsets that are different between
814 * the i82542 and the i82543 and later chips.
815 */
816 if (sc->sc_type < WM_T_82543) {
817 sc->sc_rdt_reg = WMREG_OLD_RDT0;
818 sc->sc_tdt_reg = WMREG_OLD_TDT;
819 } else {
820 sc->sc_rdt_reg = WMREG_RDT;
821 sc->sc_tdt_reg = WMREG_TDT;
822 }
823
824 /*
825 * Determine if we should use flow control. We should
826 * always use it, unless we're on a i82542 < 2.1.
827 */
828 if (sc->sc_type >= WM_T_82542_2_1)
829 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
830
831 /*
832 * Determine if we're TBI or GMII mode, and initialize the
833 * media structures accordingly.
834 */
835 if (sc->sc_type < WM_T_82543 ||
836 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
837 if (wmp->wmp_flags & WMP_F_1000T)
838 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
839 "product!\n", sc->sc_dev.dv_xname);
840 wm_tbi_mediainit(sc);
841 } else {
842 if (wmp->wmp_flags & WMP_F_1000X)
843 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
844 "product!\n", sc->sc_dev.dv_xname);
845 wm_gmii_mediainit(sc);
846 }
847
848 ifp = &sc->sc_ethercom.ec_if;
849 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
850 ifp->if_softc = sc;
851 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
852 ifp->if_ioctl = wm_ioctl;
853 ifp->if_start = wm_start;
854 ifp->if_watchdog = wm_watchdog;
855 ifp->if_init = wm_init;
856 ifp->if_stop = wm_stop;
857 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
858 IFQ_SET_READY(&ifp->if_snd);
859
860 /*
861 * If we're a i82543 or greater, we can support VLANs.
862 */
863 if (sc->sc_type >= WM_T_82543)
864 sc->sc_ethercom.ec_capabilities |=
865 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
866
867 /*
868 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
869 * on i82543 and later.
870 */
871 if (sc->sc_type >= WM_T_82543)
872 ifp->if_capabilities |=
873 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
874
875 /*
876 * Attach the interface.
877 */
878 if_attach(ifp);
879 ether_ifattach(ifp, enaddr);
880
881 #ifdef WM_EVENT_COUNTERS
882 /* Attach event counters. */
883 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
884 NULL, sc->sc_dev.dv_xname, "txsstall");
885 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
886 NULL, sc->sc_dev.dv_xname, "txdstall");
887 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
888 NULL, sc->sc_dev.dv_xname, "txforceintr");
889 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
890 NULL, sc->sc_dev.dv_xname, "txdw");
891 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
892 NULL, sc->sc_dev.dv_xname, "txqe");
893 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
894 NULL, sc->sc_dev.dv_xname, "rxintr");
895 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
896 NULL, sc->sc_dev.dv_xname, "linkintr");
897
898 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
899 NULL, sc->sc_dev.dv_xname, "rxipsum");
900 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
901 NULL, sc->sc_dev.dv_xname, "rxtusum");
902 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
903 NULL, sc->sc_dev.dv_xname, "txipsum");
904 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
905 NULL, sc->sc_dev.dv_xname, "txtusum");
906
907 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
908 NULL, sc->sc_dev.dv_xname, "txctx init");
909 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
910 NULL, sc->sc_dev.dv_xname, "txctx hit");
911 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
912 NULL, sc->sc_dev.dv_xname, "txctx miss");
913
914 for (i = 0; i < WM_NTXSEGS; i++)
915 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
916 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
917
918 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
919 NULL, sc->sc_dev.dv_xname, "txdrop");
920
921 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
922 NULL, sc->sc_dev.dv_xname, "tu");
923 #endif /* WM_EVENT_COUNTERS */
924
925 /*
926 * Make sure the interface is shutdown during reboot.
927 */
928 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
929 if (sc->sc_sdhook == NULL)
930 printf("%s: WARNING: unable to establish shutdown hook\n",
931 sc->sc_dev.dv_xname);
932 return;
933
934 /*
935 * Free any resources we've allocated during the failed attach
936 * attempt. Do this in reverse order and fall through.
937 */
938 fail_5:
939 for (i = 0; i < WM_NRXDESC; i++) {
940 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
941 bus_dmamap_destroy(sc->sc_dmat,
942 sc->sc_rxsoft[i].rxs_dmamap);
943 }
944 fail_4:
945 for (i = 0; i < WM_TXQUEUELEN; i++) {
946 if (sc->sc_txsoft[i].txs_dmamap != NULL)
947 bus_dmamap_destroy(sc->sc_dmat,
948 sc->sc_txsoft[i].txs_dmamap);
949 }
950 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
951 fail_3:
952 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
953 fail_2:
954 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
955 sizeof(struct wm_control_data));
956 fail_1:
957 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
958 fail_0:
959 return;
960 }
961
962 /*
963 * wm_shutdown:
964 *
965 * Make sure the interface is stopped at reboot time.
966 */
967 void
968 wm_shutdown(void *arg)
969 {
970 struct wm_softc *sc = arg;
971
972 wm_stop(&sc->sc_ethercom.ec_if, 1);
973 }
974
975 /*
976 * wm_tx_cksum:
977 *
978 * Set up TCP/IP checksumming parameters for the
979 * specified packet.
980 */
981 static int
982 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
983 uint32_t *fieldsp)
984 {
985 struct mbuf *m0 = txs->txs_mbuf;
986 struct livengood_tcpip_ctxdesc *t;
987 uint32_t fields = 0, ipcs, tucs;
988 struct ip *ip;
989 struct ether_header *eh;
990 int offset, iphl;
991
992 /*
993 * XXX It would be nice if the mbuf pkthdr had offset
994 * fields for the protocol headers.
995 */
996
997 eh = mtod(m0, struct ether_header *);
998 switch (htons(eh->ether_type)) {
999 case ETHERTYPE_IP:
1000 iphl = sizeof(struct ip);
1001 offset = ETHER_HDR_LEN;
1002 break;
1003
1004 default:
1005 /*
1006 * Don't support this protocol or encapsulation.
1007 */
1008 *fieldsp = 0;
1009 *cmdp = 0;
1010 return (0);
1011 }
1012
1013 /* XXX */
1014 if (m0->m_len < (offset + iphl)) {
1015 printf("%s: wm_tx_cksum: need to m_pullup, "
1016 "packet dropped\n", sc->sc_dev.dv_xname);
1017 return (EINVAL);
1018 }
1019
1020 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1021 iphl = ip->ip_hl << 2;
1022
1023 /*
1024 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1025 * offload feature, if we load the context descriptor, we
1026 * MUST provide valid values for IPCSS and TUCSS fields.
1027 */
1028
1029 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1030 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1031 fields |= htole32(WTX_IXSM);
1032 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1033 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1034 WTX_TCPIP_IPCSE(offset + iphl - 1));
1035 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1036 /* Use the cached value. */
1037 ipcs = sc->sc_txctx_ipcs;
1038 } else {
1039 /* Just initialize it to the likely value anyway. */
1040 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1041 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1042 WTX_TCPIP_IPCSE(offset + iphl - 1));
1043 }
1044
1045 offset += iphl;
1046
1047 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1048 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1049 fields |= htole32(WTX_TXSM);
1050 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1051 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1052 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1053 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1054 /* Use the cached value. */
1055 tucs = sc->sc_txctx_tucs;
1056 } else {
1057 /* Just initialize it to a valid TCP context. */
1058 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1059 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1060 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1061 }
1062
1063 if (sc->sc_txctx_ipcs == ipcs &&
1064 sc->sc_txctx_tucs == tucs) {
1065 /* Cached context is fine. */
1066 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1067 } else {
1068 /* Fill in the context descriptor. */
1069 #ifdef WM_EVENT_COUNTERS
1070 if (sc->sc_txctx_ipcs == 0xffffffff &&
1071 sc->sc_txctx_tucs == 0xffffffff)
1072 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1073 else
1074 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1075 #endif
1076 t = (struct livengood_tcpip_ctxdesc *)
1077 &sc->sc_txdescs[sc->sc_txnext];
1078 t->tcpip_ipcs = ipcs;
1079 t->tcpip_tucs = tucs;
1080 t->tcpip_cmdlen =
1081 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1082 t->tcpip_seg = 0;
1083 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1084
1085 sc->sc_txctx_ipcs = ipcs;
1086 sc->sc_txctx_tucs = tucs;
1087
1088 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1089 txs->txs_ndesc++;
1090 }
1091
1092 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1093 *fieldsp = fields;
1094
1095 return (0);
1096 }
1097
1098 /*
1099 * wm_start: [ifnet interface function]
1100 *
1101 * Start packet transmission on the interface.
1102 */
1103 void
1104 wm_start(struct ifnet *ifp)
1105 {
1106 struct wm_softc *sc = ifp->if_softc;
1107 struct mbuf *m0/*, *m*/;
1108 struct wm_txsoft *txs;
1109 bus_dmamap_t dmamap;
1110 int error, nexttx, lasttx, ofree, seg;
1111 uint32_t cksumcmd, cksumfields;
1112
1113 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1114 return;
1115
1116 /*
1117 * Remember the previous number of free descriptors.
1118 */
1119 ofree = sc->sc_txfree;
1120
1121 /*
1122 * Loop through the send queue, setting up transmit descriptors
1123 * until we drain the queue, or use up all available transmit
1124 * descriptors.
1125 */
1126 for (;;) {
1127 /* Grab a packet off the queue. */
1128 IFQ_POLL(&ifp->if_snd, m0);
1129 if (m0 == NULL)
1130 break;
1131
1132 DPRINTF(WM_DEBUG_TX,
1133 ("%s: TX: have packet to transmit: %p\n",
1134 sc->sc_dev.dv_xname, m0));
1135
1136 /* Get a work queue entry. */
1137 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1138 wm_txintr(sc);
1139 if (sc->sc_txsfree == 0) {
1140 DPRINTF(WM_DEBUG_TX,
1141 ("%s: TX: no free job descriptors\n",
1142 sc->sc_dev.dv_xname));
1143 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1144 break;
1145 }
1146 }
1147
1148 txs = &sc->sc_txsoft[sc->sc_txsnext];
1149 dmamap = txs->txs_dmamap;
1150
1151 /*
1152 * Load the DMA map. If this fails, the packet either
1153 * didn't fit in the allotted number of segments, or we
1154 * were short on resources. For the too-many-segments
1155 * case, we simply report an error and drop the packet,
1156 * since we can't sanely copy a jumbo packet to a single
1157 * buffer.
1158 */
1159 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1160 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1161 if (error) {
1162 if (error == EFBIG) {
1163 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1164 printf("%s: Tx packet consumes too many "
1165 "DMA segments, dropping...\n",
1166 sc->sc_dev.dv_xname);
1167 IFQ_DEQUEUE(&ifp->if_snd, m0);
1168 m_freem(m0);
1169 continue;
1170 }
1171 /*
1172 * Short on resources, just stop for now.
1173 */
1174 DPRINTF(WM_DEBUG_TX,
1175 ("%s: TX: dmamap load failed: %d\n",
1176 sc->sc_dev.dv_xname, error));
1177 break;
1178 }
1179
1180 /*
1181 * Ensure we have enough descriptors free to describe
1182 * the packet. Note, we always reserve one descriptor
1183 * at the end of the ring due to the semantics of the
1184 * TDT register, plus one more in the event we need
1185 * to re-load checksum offload context.
1186 */
1187 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1188 /*
1189 * Not enough free descriptors to transmit this
1190 * packet. We haven't committed anything yet,
1191 * so just unload the DMA map, put the packet
1192 * pack on the queue, and punt. Notify the upper
1193 * layer that there are no more slots left.
1194 */
1195 DPRINTF(WM_DEBUG_TX,
1196 ("%s: TX: need %d descriptors, have %d\n",
1197 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1198 sc->sc_txfree - 1));
1199 ifp->if_flags |= IFF_OACTIVE;
1200 bus_dmamap_unload(sc->sc_dmat, dmamap);
1201 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1202 break;
1203 }
1204
1205 IFQ_DEQUEUE(&ifp->if_snd, m0);
1206
1207 /*
1208 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1209 */
1210
1211 /* Sync the DMA map. */
1212 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1213 BUS_DMASYNC_PREWRITE);
1214
1215 DPRINTF(WM_DEBUG_TX,
1216 ("%s: TX: packet has %d DMA segments\n",
1217 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1218
1219 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1220
1221 /*
1222 * Store a pointer to the packet so that we can free it
1223 * later.
1224 *
1225 * Initially, we consider the number of descriptors the
1226 * packet uses the number of DMA segments. This may be
1227 * incremented by 1 if we do checksum offload (a descriptor
1228 * is used to set the checksum context).
1229 */
1230 txs->txs_mbuf = m0;
1231 txs->txs_firstdesc = sc->sc_txnext;
1232 txs->txs_ndesc = dmamap->dm_nsegs;
1233
1234 /*
1235 * Set up checksum offload parameters for
1236 * this packet.
1237 */
1238 if (m0->m_pkthdr.csum_flags &
1239 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1240 if (wm_tx_cksum(sc, txs, &cksumcmd,
1241 &cksumfields) != 0) {
1242 /* Error message already displayed. */
1243 m_freem(m0);
1244 bus_dmamap_unload(sc->sc_dmat, dmamap);
1245 txs->txs_mbuf = NULL;
1246 continue;
1247 }
1248 } else {
1249 cksumcmd = 0;
1250 cksumfields = 0;
1251 }
1252
1253 cksumcmd |= htole32(WTX_CMD_IDE);
1254
1255 /*
1256 * Initialize the transmit descriptor.
1257 */
1258 for (nexttx = sc->sc_txnext, seg = 0;
1259 seg < dmamap->dm_nsegs;
1260 seg++, nexttx = WM_NEXTTX(nexttx)) {
1261 /*
1262 * Note: we currently only use 32-bit DMA
1263 * addresses.
1264 */
1265 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1266 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1267 htole32(dmamap->dm_segs[seg].ds_addr);
1268 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1269 htole32(dmamap->dm_segs[seg].ds_len);
1270 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1271 cksumfields;
1272 lasttx = nexttx;
1273
1274 DPRINTF(WM_DEBUG_TX,
1275 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1276 sc->sc_dev.dv_xname, nexttx,
1277 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1278 (uint32_t) dmamap->dm_segs[seg].ds_len));
1279 }
1280
1281 /*
1282 * Set up the command byte on the last descriptor of
1283 * the packet. If we're in the interrupt delay window,
1284 * delay the interrupt.
1285 */
1286 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1287 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1288
1289 #if 0 /* XXXJRT */
1290 /*
1291 * If VLANs are enabled and the packet has a VLAN tag, set
1292 * up the descriptor to encapsulate the packet for us.
1293 *
1294 * This is only valid on the last descriptor of the packet.
1295 */
1296 if (sc->sc_ethercom.ec_nvlans != 0 &&
1297 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1298 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1299 htole32(WTX_CMD_VLE);
1300 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1301 = htole16(*mtod(m, int *) & 0xffff);
1302 }
1303 #endif /* XXXJRT */
1304
1305 txs->txs_lastdesc = lasttx;
1306
1307 DPRINTF(WM_DEBUG_TX,
1308 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1309 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1310
1311 /* Sync the descriptors we're using. */
1312 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1313 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1314
1315 /* Give the packet to the chip. */
1316 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1317
1318 DPRINTF(WM_DEBUG_TX,
1319 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1320
1321 DPRINTF(WM_DEBUG_TX,
1322 ("%s: TX: finished transmitting packet, job %d\n",
1323 sc->sc_dev.dv_xname, sc->sc_txsnext));
1324
1325 /* Advance the tx pointer. */
1326 sc->sc_txfree -= txs->txs_ndesc;
1327 sc->sc_txnext = nexttx;
1328
1329 sc->sc_txsfree--;
1330 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1331
1332 #if NBPFILTER > 0
1333 /* Pass the packet to any BPF listeners. */
1334 if (ifp->if_bpf)
1335 bpf_mtap(ifp->if_bpf, m0);
1336 #endif /* NBPFILTER > 0 */
1337 }
1338
1339 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1340 /* No more slots; notify upper layer. */
1341 ifp->if_flags |= IFF_OACTIVE;
1342 }
1343
1344 if (sc->sc_txfree != ofree) {
1345 /* Set a watchdog timer in case the chip flakes out. */
1346 ifp->if_timer = 5;
1347 }
1348 }
1349
1350 /*
1351 * wm_watchdog: [ifnet interface function]
1352 *
1353 * Watchdog timer handler.
1354 */
1355 void
1356 wm_watchdog(struct ifnet *ifp)
1357 {
1358 struct wm_softc *sc = ifp->if_softc;
1359
1360 /*
1361 * Since we're using delayed interrupts, sweep up
1362 * before we report an error.
1363 */
1364 wm_txintr(sc);
1365
1366 if (sc->sc_txfree != WM_NTXDESC) {
1367 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1368 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1369 sc->sc_txnext);
1370 ifp->if_oerrors++;
1371
1372 /* Reset the interface. */
1373 (void) wm_init(ifp);
1374 }
1375
1376 /* Try to get more packets going. */
1377 wm_start(ifp);
1378 }
1379
1380 /*
1381 * wm_ioctl: [ifnet interface function]
1382 *
1383 * Handle control requests from the operator.
1384 */
1385 int
1386 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1387 {
1388 struct wm_softc *sc = ifp->if_softc;
1389 struct ifreq *ifr = (struct ifreq *) data;
1390 int s, error;
1391
1392 s = splnet();
1393
1394 switch (cmd) {
1395 case SIOCSIFMEDIA:
1396 case SIOCGIFMEDIA:
1397 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1398 break;
1399
1400 default:
1401 error = ether_ioctl(ifp, cmd, data);
1402 if (error == ENETRESET) {
1403 /*
1404 * Multicast list has changed; set the hardware filter
1405 * accordingly.
1406 */
1407 wm_set_filter(sc);
1408 error = 0;
1409 }
1410 break;
1411 }
1412
1413 /* Try to get more packets going. */
1414 wm_start(ifp);
1415
1416 splx(s);
1417 return (error);
1418 }
1419
1420 /*
1421 * wm_intr:
1422 *
1423 * Interrupt service routine.
1424 */
1425 int
1426 wm_intr(void *arg)
1427 {
1428 struct wm_softc *sc = arg;
1429 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1430 uint32_t icr;
1431 int wantinit, handled = 0;
1432
1433 for (wantinit = 0; wantinit == 0;) {
1434 icr = CSR_READ(sc, WMREG_ICR);
1435 if ((icr & sc->sc_icr) == 0)
1436 break;
1437
1438 handled = 1;
1439
1440 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1441 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1442 DPRINTF(WM_DEBUG_RX,
1443 ("%s: RX: got Rx intr 0x%08x\n",
1444 sc->sc_dev.dv_xname,
1445 icr & (ICR_RXDMT0|ICR_RXT0)));
1446 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1447 }
1448 #endif
1449 wm_rxintr(sc);
1450
1451 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1452 if (icr & ICR_TXDW) {
1453 DPRINTF(WM_DEBUG_TX,
1454 ("%s: TX: got TDXW interrupt\n",
1455 sc->sc_dev.dv_xname));
1456 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1457 }
1458 #endif
1459 wm_txintr(sc);
1460
1461 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1462 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1463 wm_linkintr(sc, icr);
1464 }
1465
1466 if (icr & ICR_RXO) {
1467 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1468 wantinit = 1;
1469 }
1470 }
1471
1472 if (handled) {
1473 if (wantinit)
1474 wm_init(ifp);
1475
1476 /* Try to get more packets going. */
1477 wm_start(ifp);
1478 }
1479
1480 return (handled);
1481 }
1482
1483 /*
1484 * wm_txintr:
1485 *
1486 * Helper; handle transmit interrupts.
1487 */
1488 void
1489 wm_txintr(struct wm_softc *sc)
1490 {
1491 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1492 struct wm_txsoft *txs;
1493 uint8_t status;
1494 int i;
1495
1496 ifp->if_flags &= ~IFF_OACTIVE;
1497
1498 /*
1499 * Go through the Tx list and free mbufs for those
1500 * frames which have been transmitted.
1501 */
1502 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1503 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1504 txs = &sc->sc_txsoft[i];
1505
1506 DPRINTF(WM_DEBUG_TX,
1507 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1508
1509 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1510 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1511
1512 status = le32toh(sc->sc_txdescs[
1513 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1514 if ((status & WTX_ST_DD) == 0)
1515 break;
1516
1517 DPRINTF(WM_DEBUG_TX,
1518 ("%s: TX: job %d done: descs %d..%d\n",
1519 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1520 txs->txs_lastdesc));
1521
1522 /*
1523 * XXX We should probably be using the statistics
1524 * XXX registers, but I don't know if they exist
1525 * XXX on chips before the i82544.
1526 */
1527
1528 #ifdef WM_EVENT_COUNTERS
1529 if (status & WTX_ST_TU)
1530 WM_EVCNT_INCR(&sc->sc_ev_tu);
1531 #endif /* WM_EVENT_COUNTERS */
1532
1533 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1534 ifp->if_oerrors++;
1535 if (status & WTX_ST_LC)
1536 printf("%s: late collision\n",
1537 sc->sc_dev.dv_xname);
1538 else if (status & WTX_ST_EC) {
1539 ifp->if_collisions += 16;
1540 printf("%s: excessive collisions\n",
1541 sc->sc_dev.dv_xname);
1542 }
1543 } else
1544 ifp->if_opackets++;
1545
1546 sc->sc_txfree += txs->txs_ndesc;
1547 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1548 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1549 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1550 m_freem(txs->txs_mbuf);
1551 txs->txs_mbuf = NULL;
1552 }
1553
1554 /* Update the dirty transmit buffer pointer. */
1555 sc->sc_txsdirty = i;
1556 DPRINTF(WM_DEBUG_TX,
1557 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1558
1559 /*
1560 * If there are no more pending transmissions, cancel the watchdog
1561 * timer.
1562 */
1563 if (sc->sc_txsfree == WM_TXQUEUELEN)
1564 ifp->if_timer = 0;
1565 }
1566
1567 /*
1568 * wm_rxintr:
1569 *
1570 * Helper; handle receive interrupts.
1571 */
1572 void
1573 wm_rxintr(struct wm_softc *sc)
1574 {
1575 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1576 struct wm_rxsoft *rxs;
1577 struct mbuf *m;
1578 int i, len;
1579 uint8_t status, errors;
1580
1581 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1582 rxs = &sc->sc_rxsoft[i];
1583
1584 DPRINTF(WM_DEBUG_RX,
1585 ("%s: RX: checking descriptor %d\n",
1586 sc->sc_dev.dv_xname, i));
1587
1588 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1589
1590 status = sc->sc_rxdescs[i].wrx_status;
1591 errors = sc->sc_rxdescs[i].wrx_errors;
1592 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1593
1594 if ((status & WRX_ST_DD) == 0) {
1595 /*
1596 * We have processed all of the receive descriptors.
1597 */
1598 break;
1599 }
1600
1601 if (__predict_false(sc->sc_rxdiscard)) {
1602 DPRINTF(WM_DEBUG_RX,
1603 ("%s: RX: discarding contents of descriptor %d\n",
1604 sc->sc_dev.dv_xname, i));
1605 WM_INIT_RXDESC(sc, i);
1606 if (status & WRX_ST_EOP) {
1607 /* Reset our state. */
1608 DPRINTF(WM_DEBUG_RX,
1609 ("%s: RX: resetting rxdiscard -> 0\n",
1610 sc->sc_dev.dv_xname));
1611 sc->sc_rxdiscard = 0;
1612 }
1613 continue;
1614 }
1615
1616 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1617 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1618
1619 m = rxs->rxs_mbuf;
1620
1621 /*
1622 * Add a new receive buffer to the ring.
1623 */
1624 if (wm_add_rxbuf(sc, i) != 0) {
1625 /*
1626 * Failed, throw away what we've done so
1627 * far, and discard the rest of the packet.
1628 */
1629 ifp->if_ierrors++;
1630 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1631 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1632 WM_INIT_RXDESC(sc, i);
1633 if ((status & WRX_ST_EOP) == 0)
1634 sc->sc_rxdiscard = 1;
1635 if (sc->sc_rxhead != NULL)
1636 m_freem(sc->sc_rxhead);
1637 WM_RXCHAIN_RESET(sc);
1638 DPRINTF(WM_DEBUG_RX,
1639 ("%s: RX: Rx buffer allocation failed, "
1640 "dropping packet%s\n", sc->sc_dev.dv_xname,
1641 sc->sc_rxdiscard ? " (discard)" : ""));
1642 continue;
1643 }
1644
1645 WM_RXCHAIN_LINK(sc, m);
1646
1647 m->m_len = len;
1648
1649 DPRINTF(WM_DEBUG_RX,
1650 ("%s: RX: buffer at %p len %d\n",
1651 sc->sc_dev.dv_xname, m->m_data, len));
1652
1653 /*
1654 * If this is not the end of the packet, keep
1655 * looking.
1656 */
1657 if ((status & WRX_ST_EOP) == 0) {
1658 sc->sc_rxlen += len;
1659 DPRINTF(WM_DEBUG_RX,
1660 ("%s: RX: not yet EOP, rxlen -> %d\n",
1661 sc->sc_dev.dv_xname, sc->sc_rxlen));
1662 continue;
1663 }
1664
1665 /*
1666 * Okay, we have the entire packet now...
1667 */
1668 *sc->sc_rxtailp = NULL;
1669 m = sc->sc_rxhead;
1670 len += sc->sc_rxlen;
1671
1672 WM_RXCHAIN_RESET(sc);
1673
1674 DPRINTF(WM_DEBUG_RX,
1675 ("%s: RX: have entire packet, len -> %d\n",
1676 sc->sc_dev.dv_xname, len));
1677
1678 /*
1679 * If an error occurred, update stats and drop the packet.
1680 */
1681 if (errors &
1682 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1683 ifp->if_ierrors++;
1684 if (errors & WRX_ER_SE)
1685 printf("%s: symbol error\n",
1686 sc->sc_dev.dv_xname);
1687 else if (errors & WRX_ER_SEQ)
1688 printf("%s: receive sequence error\n",
1689 sc->sc_dev.dv_xname);
1690 else if (errors & WRX_ER_CE)
1691 printf("%s: CRC error\n",
1692 sc->sc_dev.dv_xname);
1693 m_freem(m);
1694 continue;
1695 }
1696
1697 /*
1698 * No errors. Receive the packet.
1699 *
1700 * Note, we have configured the chip to include the
1701 * CRC with every packet.
1702 */
1703 m->m_flags |= M_HASFCS;
1704 m->m_pkthdr.rcvif = ifp;
1705 m->m_pkthdr.len = len;
1706
1707 #if 0 /* XXXJRT */
1708 /*
1709 * If VLANs are enabled, VLAN packets have been unwrapped
1710 * for us. Associate the tag with the packet.
1711 */
1712 if (sc->sc_ethercom.ec_nvlans != 0 &&
1713 (status & WRX_ST_VP) != 0) {
1714 struct mbuf *vtag;
1715
1716 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1717 if (vtag == NULL) {
1718 ifp->if_ierrors++;
1719 printf("%s: unable to allocate VLAN tag\n",
1720 sc->sc_dev.dv_xname);
1721 m_freem(m);
1722 continue;
1723 }
1724
1725 *mtod(m, int *) =
1726 le16toh(sc->sc_rxdescs[i].wrx_special);
1727 vtag->m_len = sizeof(int);
1728 }
1729 #endif /* XXXJRT */
1730
1731 /*
1732 * Set up checksum info for this packet.
1733 */
1734 if (status & WRX_ST_IPCS) {
1735 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1736 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1737 if (errors & WRX_ER_IPE)
1738 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1739 }
1740 if (status & WRX_ST_TCPCS) {
1741 /*
1742 * Note: we don't know if this was TCP or UDP,
1743 * so we just set both bits, and expect the
1744 * upper layers to deal.
1745 */
1746 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1747 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1748 if (errors & WRX_ER_TCPE)
1749 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1750 }
1751
1752 ifp->if_ipackets++;
1753
1754 #if NBPFILTER > 0
1755 /* Pass this up to any BPF listeners. */
1756 if (ifp->if_bpf)
1757 bpf_mtap(ifp->if_bpf, m);
1758 #endif /* NBPFILTER > 0 */
1759
1760 /* Pass it on. */
1761 (*ifp->if_input)(ifp, m);
1762 }
1763
1764 /* Update the receive pointer. */
1765 sc->sc_rxptr = i;
1766
1767 DPRINTF(WM_DEBUG_RX,
1768 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1769 }
1770
1771 /*
1772 * wm_linkintr:
1773 *
1774 * Helper; handle link interrupts.
1775 */
1776 void
1777 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1778 {
1779 uint32_t status;
1780
1781 /*
1782 * If we get a link status interrupt on a 1000BASE-T
1783 * device, just fall into the normal MII tick path.
1784 */
1785 if (sc->sc_flags & WM_F_HAS_MII) {
1786 if (icr & ICR_LSC) {
1787 DPRINTF(WM_DEBUG_LINK,
1788 ("%s: LINK: LSC -> mii_tick\n",
1789 sc->sc_dev.dv_xname));
1790 mii_tick(&sc->sc_mii);
1791 } else if (icr & ICR_RXSEQ) {
1792 DPRINTF(WM_DEBUG_LINK,
1793 ("%s: LINK Receive sequence error\n",
1794 sc->sc_dev.dv_xname));
1795 }
1796 return;
1797 }
1798
1799 /*
1800 * If we are now receiving /C/, check for link again in
1801 * a couple of link clock ticks.
1802 */
1803 if (icr & ICR_RXCFG) {
1804 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1805 sc->sc_dev.dv_xname));
1806 sc->sc_tbi_anstate = 2;
1807 }
1808
1809 if (icr & ICR_LSC) {
1810 status = CSR_READ(sc, WMREG_STATUS);
1811 if (status & STATUS_LU) {
1812 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1813 sc->sc_dev.dv_xname,
1814 (status & STATUS_FD) ? "FDX" : "HDX"));
1815 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1816 if (status & STATUS_FD)
1817 sc->sc_tctl |=
1818 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1819 else
1820 sc->sc_tctl |=
1821 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1822 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1823 sc->sc_tbi_linkup = 1;
1824 } else {
1825 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1826 sc->sc_dev.dv_xname));
1827 sc->sc_tbi_linkup = 0;
1828 }
1829 sc->sc_tbi_anstate = 2;
1830 wm_tbi_set_linkled(sc);
1831 } else if (icr & ICR_RXSEQ) {
1832 DPRINTF(WM_DEBUG_LINK,
1833 ("%s: LINK: Receive sequence error\n",
1834 sc->sc_dev.dv_xname));
1835 }
1836 }
1837
1838 /*
1839 * wm_tick:
1840 *
1841 * One second timer, used to check link status, sweep up
1842 * completed transmit jobs, etc.
1843 */
1844 void
1845 wm_tick(void *arg)
1846 {
1847 struct wm_softc *sc = arg;
1848 int s;
1849
1850 s = splnet();
1851
1852 if (sc->sc_flags & WM_F_HAS_MII)
1853 mii_tick(&sc->sc_mii);
1854 else
1855 wm_tbi_check_link(sc);
1856
1857 splx(s);
1858
1859 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1860 }
1861
1862 /*
1863 * wm_reset:
1864 *
1865 * Reset the i82542 chip.
1866 */
1867 void
1868 wm_reset(struct wm_softc *sc)
1869 {
1870 int i;
1871
1872 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1873 delay(10000);
1874
1875 for (i = 0; i < 1000; i++) {
1876 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1877 return;
1878 delay(20);
1879 }
1880
1881 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1882 printf("%s: WARNING: reset failed to complete\n",
1883 sc->sc_dev.dv_xname);
1884 }
1885
1886 /*
1887 * wm_init: [ifnet interface function]
1888 *
1889 * Initialize the interface. Must be called at splnet().
1890 */
1891 int
1892 wm_init(struct ifnet *ifp)
1893 {
1894 struct wm_softc *sc = ifp->if_softc;
1895 struct wm_rxsoft *rxs;
1896 int i, error = 0;
1897 uint32_t reg;
1898
1899 /* Cancel any pending I/O. */
1900 wm_stop(ifp, 0);
1901
1902 /* Reset the chip to a known state. */
1903 wm_reset(sc);
1904
1905 /* Initialize the transmit descriptor ring. */
1906 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1907 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1908 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1909 sc->sc_txfree = WM_NTXDESC;
1910 sc->sc_txnext = 0;
1911
1912 sc->sc_txctx_ipcs = 0xffffffff;
1913 sc->sc_txctx_tucs = 0xffffffff;
1914
1915 if (sc->sc_type < WM_T_82543) {
1916 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1917 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1918 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1919 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1920 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1921 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1922 } else {
1923 CSR_WRITE(sc, WMREG_TBDAH, 0);
1924 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1925 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1926 CSR_WRITE(sc, WMREG_TDH, 0);
1927 CSR_WRITE(sc, WMREG_TDT, 0);
1928 CSR_WRITE(sc, WMREG_TIDV, 128);
1929
1930 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1931 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1932 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1933 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1934 }
1935 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1936 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1937
1938 /* Initialize the transmit job descriptors. */
1939 for (i = 0; i < WM_TXQUEUELEN; i++)
1940 sc->sc_txsoft[i].txs_mbuf = NULL;
1941 sc->sc_txsfree = WM_TXQUEUELEN;
1942 sc->sc_txsnext = 0;
1943 sc->sc_txsdirty = 0;
1944
1945 /*
1946 * Initialize the receive descriptor and receive job
1947 * descriptor rings.
1948 */
1949 if (sc->sc_type < WM_T_82543) {
1950 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1951 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1952 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1953 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1954 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1955 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1956
1957 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1958 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1959 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1960 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1961 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1962 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1963 } else {
1964 CSR_WRITE(sc, WMREG_RDBAH, 0);
1965 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1966 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1967 CSR_WRITE(sc, WMREG_RDH, 0);
1968 CSR_WRITE(sc, WMREG_RDT, 0);
1969 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1970 }
1971 for (i = 0; i < WM_NRXDESC; i++) {
1972 rxs = &sc->sc_rxsoft[i];
1973 if (rxs->rxs_mbuf == NULL) {
1974 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1975 printf("%s: unable to allocate or map rx "
1976 "buffer %d, error = %d\n",
1977 sc->sc_dev.dv_xname, i, error);
1978 /*
1979 * XXX Should attempt to run with fewer receive
1980 * XXX buffers instead of just failing.
1981 */
1982 wm_rxdrain(sc);
1983 goto out;
1984 }
1985 } else
1986 WM_INIT_RXDESC(sc, i);
1987 }
1988 sc->sc_rxptr = 0;
1989 sc->sc_rxdiscard = 0;
1990 WM_RXCHAIN_RESET(sc);
1991
1992 /*
1993 * Clear out the VLAN table -- we don't use it (yet).
1994 */
1995 CSR_WRITE(sc, WMREG_VET, 0);
1996 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1997 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1998
1999 /*
2000 * Set up flow-control parameters.
2001 *
2002 * XXX Values could probably stand some tuning.
2003 */
2004 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2005 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2006 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2007 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2008
2009 if (sc->sc_type < WM_T_82543) {
2010 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2011 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2012 } else {
2013 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2014 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2015 }
2016 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2017 }
2018
2019 #if 0 /* XXXJRT */
2020 /* Deal with VLAN enables. */
2021 if (sc->sc_ethercom.ec_nvlans != 0)
2022 sc->sc_ctrl |= CTRL_VME;
2023 else
2024 #endif /* XXXJRT */
2025 sc->sc_ctrl &= ~CTRL_VME;
2026
2027 /* Write the control registers. */
2028 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2029 #if 0
2030 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2031 #endif
2032
2033 /*
2034 * Set up checksum offload parameters.
2035 */
2036 reg = CSR_READ(sc, WMREG_RXCSUM);
2037 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2038 reg |= RXCSUM_IPOFL;
2039 else
2040 reg &= ~RXCSUM_IPOFL;
2041 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2042 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2043 else {
2044 reg &= ~RXCSUM_TUOFL;
2045 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2046 reg &= ~RXCSUM_IPOFL;
2047 }
2048 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2049
2050 /*
2051 * Set up the interrupt registers.
2052 */
2053 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2054 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2055 ICR_RXO | ICR_RXT0;
2056 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2057 sc->sc_icr |= ICR_RXCFG;
2058 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2059
2060 /* Set up the inter-packet gap. */
2061 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2062
2063 #if 0 /* XXXJRT */
2064 /* Set the VLAN ethernetype. */
2065 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2066 #endif
2067
2068 /*
2069 * Set up the transmit control register; we start out with
2070 * a collision distance suitable for FDX, but update it whe
2071 * we resolve the media type.
2072 */
2073 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2074 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2075 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2076
2077 /* Set the media. */
2078 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2079
2080 /*
2081 * Set up the receive control register; we actually program
2082 * the register when we set the receive filter. Use multicast
2083 * address offset type 0.
2084 *
2085 * Only the i82544 has the ability to strip the incoming
2086 * CRC, so we don't enable that feature.
2087 */
2088 sc->sc_mchash_type = 0;
2089 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2090 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2091
2092 /* Set the receive filter. */
2093 wm_set_filter(sc);
2094
2095 /* Start the one second link check clock. */
2096 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2097
2098 /* ...all done! */
2099 ifp->if_flags |= IFF_RUNNING;
2100 ifp->if_flags &= ~IFF_OACTIVE;
2101
2102 out:
2103 if (error)
2104 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2105 return (error);
2106 }
2107
2108 /*
2109 * wm_rxdrain:
2110 *
2111 * Drain the receive queue.
2112 */
2113 void
2114 wm_rxdrain(struct wm_softc *sc)
2115 {
2116 struct wm_rxsoft *rxs;
2117 int i;
2118
2119 for (i = 0; i < WM_NRXDESC; i++) {
2120 rxs = &sc->sc_rxsoft[i];
2121 if (rxs->rxs_mbuf != NULL) {
2122 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2123 m_freem(rxs->rxs_mbuf);
2124 rxs->rxs_mbuf = NULL;
2125 }
2126 }
2127 }
2128
2129 /*
2130 * wm_stop: [ifnet interface function]
2131 *
2132 * Stop transmission on the interface.
2133 */
2134 void
2135 wm_stop(struct ifnet *ifp, int disable)
2136 {
2137 struct wm_softc *sc = ifp->if_softc;
2138 struct wm_txsoft *txs;
2139 int i;
2140
2141 /* Stop the one second clock. */
2142 callout_stop(&sc->sc_tick_ch);
2143
2144 if (sc->sc_flags & WM_F_HAS_MII) {
2145 /* Down the MII. */
2146 mii_down(&sc->sc_mii);
2147 }
2148
2149 /* Stop the transmit and receive processes. */
2150 CSR_WRITE(sc, WMREG_TCTL, 0);
2151 CSR_WRITE(sc, WMREG_RCTL, 0);
2152
2153 /* Release any queued transmit buffers. */
2154 for (i = 0; i < WM_TXQUEUELEN; i++) {
2155 txs = &sc->sc_txsoft[i];
2156 if (txs->txs_mbuf != NULL) {
2157 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2158 m_freem(txs->txs_mbuf);
2159 txs->txs_mbuf = NULL;
2160 }
2161 }
2162
2163 if (disable)
2164 wm_rxdrain(sc);
2165
2166 /* Mark the interface as down and cancel the watchdog timer. */
2167 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2168 ifp->if_timer = 0;
2169 }
2170
2171 /*
2172 * wm_read_eeprom:
2173 *
2174 * Read data from the serial EEPROM.
2175 */
2176 void
2177 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2178 {
2179 uint32_t reg;
2180 int i, x, addrbits = 6;
2181
2182 for (i = 0; i < wordcnt; i++) {
2183 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2184 reg = CSR_READ(sc, WMREG_EECD);
2185
2186 /* Get number of address bits. */
2187 if (reg & EECD_EE_SIZE)
2188 addrbits = 8;
2189
2190 /* Request EEPROM access. */
2191 reg |= EECD_EE_REQ;
2192 CSR_WRITE(sc, WMREG_EECD, reg);
2193
2194 /* ..and wait for it to be granted. */
2195 for (x = 0; x < 100; x++) {
2196 reg = CSR_READ(sc, WMREG_EECD);
2197 if (reg & EECD_EE_GNT)
2198 break;
2199 delay(5);
2200 }
2201 if ((reg & EECD_EE_GNT) == 0) {
2202 printf("%s: could not acquire EEPROM GNT\n",
2203 sc->sc_dev.dv_xname);
2204 *data = 0xffff;
2205 reg &= ~EECD_EE_REQ;
2206 CSR_WRITE(sc, WMREG_EECD, reg);
2207 continue;
2208 }
2209 } else
2210 reg = 0;
2211
2212 /* Clear SK and DI. */
2213 reg &= ~(EECD_SK | EECD_DI);
2214 CSR_WRITE(sc, WMREG_EECD, reg);
2215
2216 /* Set CHIP SELECT. */
2217 reg |= EECD_CS;
2218 CSR_WRITE(sc, WMREG_EECD, reg);
2219 delay(2);
2220
2221 /* Shift in the READ command. */
2222 for (x = 3; x > 0; x--) {
2223 if (UWIRE_OPC_READ & (1 << (x - 1)))
2224 reg |= EECD_DI;
2225 else
2226 reg &= ~EECD_DI;
2227 CSR_WRITE(sc, WMREG_EECD, reg);
2228 delay(2);
2229 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2230 delay(2);
2231 CSR_WRITE(sc, WMREG_EECD, reg);
2232 delay(2);
2233 }
2234
2235 /* Shift in address. */
2236 for (x = addrbits; x > 0; x--) {
2237 if ((word + i) & (1 << (x - 1)))
2238 reg |= EECD_DI;
2239 else
2240 reg &= ~EECD_DI;
2241 CSR_WRITE(sc, WMREG_EECD, reg);
2242 delay(2);
2243 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2244 delay(2);
2245 CSR_WRITE(sc, WMREG_EECD, reg);
2246 delay(2);
2247 }
2248
2249 /* Shift out the data. */
2250 reg &= ~EECD_DI;
2251 data[i] = 0;
2252 for (x = 16; x > 0; x--) {
2253 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2254 delay(2);
2255 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2256 data[i] |= (1 << (x - 1));
2257 CSR_WRITE(sc, WMREG_EECD, reg);
2258 delay(2);
2259 }
2260
2261 /* Clear CHIP SELECT. */
2262 reg &= ~EECD_CS;
2263 CSR_WRITE(sc, WMREG_EECD, reg);
2264 delay(2);
2265
2266 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2267 /* Release the EEPROM. */
2268 reg &= ~EECD_EE_REQ;
2269 CSR_WRITE(sc, WMREG_EECD, reg);
2270 }
2271 }
2272 }
2273
2274 /*
2275 * wm_add_rxbuf:
2276 *
2277 * Add a receive buffer to the indiciated descriptor.
2278 */
2279 int
2280 wm_add_rxbuf(struct wm_softc *sc, int idx)
2281 {
2282 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2283 struct mbuf *m;
2284 int error;
2285
2286 MGETHDR(m, M_DONTWAIT, MT_DATA);
2287 if (m == NULL)
2288 return (ENOBUFS);
2289
2290 MCLGET(m, M_DONTWAIT);
2291 if ((m->m_flags & M_EXT) == 0) {
2292 m_freem(m);
2293 return (ENOBUFS);
2294 }
2295
2296 if (rxs->rxs_mbuf != NULL)
2297 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2298
2299 rxs->rxs_mbuf = m;
2300
2301 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2302 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2303 BUS_DMA_READ|BUS_DMA_NOWAIT);
2304 if (error) {
2305 printf("%s: unable to load rx DMA map %d, error = %d\n",
2306 sc->sc_dev.dv_xname, idx, error);
2307 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2308 }
2309
2310 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2311 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2312
2313 WM_INIT_RXDESC(sc, idx);
2314
2315 return (0);
2316 }
2317
2318 /*
2319 * wm_set_ral:
2320 *
2321 * Set an entery in the receive address list.
2322 */
2323 static void
2324 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2325 {
2326 uint32_t ral_lo, ral_hi;
2327
2328 if (enaddr != NULL) {
2329 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2330 (enaddr[3] << 24);
2331 ral_hi = enaddr[4] | (enaddr[5] << 8);
2332 ral_hi |= RAL_AV;
2333 } else {
2334 ral_lo = 0;
2335 ral_hi = 0;
2336 }
2337
2338 if (sc->sc_type >= WM_T_82544) {
2339 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2340 ral_lo);
2341 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2342 ral_hi);
2343 } else {
2344 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2345 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2346 }
2347 }
2348
2349 /*
2350 * wm_mchash:
2351 *
2352 * Compute the hash of the multicast address for the 4096-bit
2353 * multicast filter.
2354 */
2355 static uint32_t
2356 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2357 {
2358 static const int lo_shift[4] = { 4, 3, 2, 0 };
2359 static const int hi_shift[4] = { 4, 5, 6, 8 };
2360 uint32_t hash;
2361
2362 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2363 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2364
2365 return (hash & 0xfff);
2366 }
2367
2368 /*
2369 * wm_set_filter:
2370 *
2371 * Set up the receive filter.
2372 */
2373 void
2374 wm_set_filter(struct wm_softc *sc)
2375 {
2376 struct ethercom *ec = &sc->sc_ethercom;
2377 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2378 struct ether_multi *enm;
2379 struct ether_multistep step;
2380 bus_addr_t mta_reg;
2381 uint32_t hash, reg, bit;
2382 int i;
2383
2384 if (sc->sc_type >= WM_T_82544)
2385 mta_reg = WMREG_CORDOVA_MTA;
2386 else
2387 mta_reg = WMREG_MTA;
2388
2389 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2390
2391 if (ifp->if_flags & IFF_BROADCAST)
2392 sc->sc_rctl |= RCTL_BAM;
2393 if (ifp->if_flags & IFF_PROMISC) {
2394 sc->sc_rctl |= RCTL_UPE;
2395 goto allmulti;
2396 }
2397
2398 /*
2399 * Set the station address in the first RAL slot, and
2400 * clear the remaining slots.
2401 */
2402 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2403 for (i = 1; i < WM_RAL_TABSIZE; i++)
2404 wm_set_ral(sc, NULL, i);
2405
2406 /* Clear out the multicast table. */
2407 for (i = 0; i < WM_MC_TABSIZE; i++)
2408 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2409
2410 ETHER_FIRST_MULTI(step, ec, enm);
2411 while (enm != NULL) {
2412 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2413 /*
2414 * We must listen to a range of multicast addresses.
2415 * For now, just accept all multicasts, rather than
2416 * trying to set only those filter bits needed to match
2417 * the range. (At this time, the only use of address
2418 * ranges is for IP multicast routing, for which the
2419 * range is big enough to require all bits set.)
2420 */
2421 goto allmulti;
2422 }
2423
2424 hash = wm_mchash(sc, enm->enm_addrlo);
2425
2426 reg = (hash >> 5) & 0x7f;
2427 bit = hash & 0x1f;
2428
2429 hash = CSR_READ(sc, mta_reg + (reg << 2));
2430 hash |= 1U << bit;
2431
2432 /* XXX Hardware bug?? */
2433 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2434 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2435 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2436 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2437 } else
2438 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2439
2440 ETHER_NEXT_MULTI(step, enm);
2441 }
2442
2443 ifp->if_flags &= ~IFF_ALLMULTI;
2444 goto setit;
2445
2446 allmulti:
2447 ifp->if_flags |= IFF_ALLMULTI;
2448 sc->sc_rctl |= RCTL_MPE;
2449
2450 setit:
2451 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2452 }
2453
2454 /*
2455 * wm_tbi_mediainit:
2456 *
2457 * Initialize media for use on 1000BASE-X devices.
2458 */
2459 void
2460 wm_tbi_mediainit(struct wm_softc *sc)
2461 {
2462 const char *sep = "";
2463
2464 if (sc->sc_type < WM_T_82543)
2465 sc->sc_tipg = TIPG_WM_DFLT;
2466 else
2467 sc->sc_tipg = TIPG_LG_DFLT;
2468
2469 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2470 wm_tbi_mediastatus);
2471
2472 /*
2473 * SWD Pins:
2474 *
2475 * 0 = Link LED (output)
2476 * 1 = Loss Of Signal (input)
2477 */
2478 sc->sc_ctrl |= CTRL_SWDPIO(0);
2479 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2480
2481 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2482
2483 #define ADD(s, m, d) \
2484 do { \
2485 printf("%s%s", sep, s); \
2486 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2487 sep = ", "; \
2488 } while (/*CONSTCOND*/0)
2489
2490 printf("%s: ", sc->sc_dev.dv_xname);
2491 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2492 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2493 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2494 printf("\n");
2495
2496 #undef ADD
2497
2498 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2499 }
2500
2501 /*
2502 * wm_tbi_mediastatus: [ifmedia interface function]
2503 *
2504 * Get the current interface media status on a 1000BASE-X device.
2505 */
2506 void
2507 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2508 {
2509 struct wm_softc *sc = ifp->if_softc;
2510
2511 ifmr->ifm_status = IFM_AVALID;
2512 ifmr->ifm_active = IFM_ETHER;
2513
2514 if (sc->sc_tbi_linkup == 0) {
2515 ifmr->ifm_active |= IFM_NONE;
2516 return;
2517 }
2518
2519 ifmr->ifm_status |= IFM_ACTIVE;
2520 ifmr->ifm_active |= IFM_1000_SX;
2521 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2522 ifmr->ifm_active |= IFM_FDX;
2523 }
2524
2525 /*
2526 * wm_tbi_mediachange: [ifmedia interface function]
2527 *
2528 * Set hardware to newly-selected media on a 1000BASE-X device.
2529 */
2530 int
2531 wm_tbi_mediachange(struct ifnet *ifp)
2532 {
2533 struct wm_softc *sc = ifp->if_softc;
2534 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2535 uint32_t status;
2536 int i;
2537
2538 sc->sc_txcw = ife->ifm_data;
2539 if (sc->sc_ctrl & CTRL_RFCE)
2540 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2541 if (sc->sc_ctrl & CTRL_TFCE)
2542 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2543 sc->sc_txcw |= TXCW_ANE;
2544
2545 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2546 delay(10000);
2547
2548 sc->sc_tbi_anstate = 0;
2549
2550 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2551 /* Have signal; wait for the link to come up. */
2552 for (i = 0; i < 50; i++) {
2553 delay(10000);
2554 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2555 break;
2556 }
2557
2558 status = CSR_READ(sc, WMREG_STATUS);
2559 if (status & STATUS_LU) {
2560 /* Link is up. */
2561 DPRINTF(WM_DEBUG_LINK,
2562 ("%s: LINK: set media -> link up %s\n",
2563 sc->sc_dev.dv_xname,
2564 (status & STATUS_FD) ? "FDX" : "HDX"));
2565 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2566 if (status & STATUS_FD)
2567 sc->sc_tctl |=
2568 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2569 else
2570 sc->sc_tctl |=
2571 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2572 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2573 sc->sc_tbi_linkup = 1;
2574 } else {
2575 /* Link is down. */
2576 DPRINTF(WM_DEBUG_LINK,
2577 ("%s: LINK: set media -> link down\n",
2578 sc->sc_dev.dv_xname));
2579 sc->sc_tbi_linkup = 0;
2580 }
2581 } else {
2582 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2583 sc->sc_dev.dv_xname));
2584 sc->sc_tbi_linkup = 0;
2585 }
2586
2587 wm_tbi_set_linkled(sc);
2588
2589 return (0);
2590 }
2591
2592 /*
2593 * wm_tbi_set_linkled:
2594 *
2595 * Update the link LED on 1000BASE-X devices.
2596 */
2597 void
2598 wm_tbi_set_linkled(struct wm_softc *sc)
2599 {
2600
2601 if (sc->sc_tbi_linkup)
2602 sc->sc_ctrl |= CTRL_SWDPIN(0);
2603 else
2604 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2605
2606 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2607 }
2608
2609 /*
2610 * wm_tbi_check_link:
2611 *
2612 * Check the link on 1000BASE-X devices.
2613 */
2614 void
2615 wm_tbi_check_link(struct wm_softc *sc)
2616 {
2617 uint32_t rxcw, ctrl, status;
2618
2619 if (sc->sc_tbi_anstate == 0)
2620 return;
2621 else if (sc->sc_tbi_anstate > 1) {
2622 DPRINTF(WM_DEBUG_LINK,
2623 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2624 sc->sc_tbi_anstate));
2625 sc->sc_tbi_anstate--;
2626 return;
2627 }
2628
2629 sc->sc_tbi_anstate = 0;
2630
2631 rxcw = CSR_READ(sc, WMREG_RXCW);
2632 ctrl = CSR_READ(sc, WMREG_CTRL);
2633 status = CSR_READ(sc, WMREG_STATUS);
2634
2635 if ((status & STATUS_LU) == 0) {
2636 DPRINTF(WM_DEBUG_LINK,
2637 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2638 sc->sc_tbi_linkup = 0;
2639 } else {
2640 DPRINTF(WM_DEBUG_LINK,
2641 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2642 (status & STATUS_FD) ? "FDX" : "HDX"));
2643 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2644 if (status & STATUS_FD)
2645 sc->sc_tctl |=
2646 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2647 else
2648 sc->sc_tctl |=
2649 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2650 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2651 sc->sc_tbi_linkup = 1;
2652 }
2653
2654 wm_tbi_set_linkled(sc);
2655 }
2656
2657 /*
2658 * wm_gmii_reset:
2659 *
2660 * Reset the PHY.
2661 */
2662 void
2663 wm_gmii_reset(struct wm_softc *sc)
2664 {
2665 uint32_t reg;
2666
2667 if (sc->sc_type >= WM_T_82544) {
2668 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2669 delay(20000);
2670
2671 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2672 delay(20000);
2673 } else {
2674 /* The PHY reset pin is active-low. */
2675 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2676 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2677 CTRL_EXT_SWDPIN(4));
2678 reg |= CTRL_EXT_SWDPIO(4);
2679
2680 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2681 delay(10);
2682
2683 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2684 delay(10);
2685
2686 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2687 delay(10);
2688 #if 0
2689 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2690 #endif
2691 }
2692 }
2693
2694 /*
2695 * wm_gmii_mediainit:
2696 *
2697 * Initialize media for use on 1000BASE-T devices.
2698 */
2699 void
2700 wm_gmii_mediainit(struct wm_softc *sc)
2701 {
2702 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2703
2704 /* We have MII. */
2705 sc->sc_flags |= WM_F_HAS_MII;
2706
2707 sc->sc_tipg = TIPG_1000T_DFLT;
2708
2709 /*
2710 * Let the chip set speed/duplex on its own based on
2711 * signals from the PHY.
2712 */
2713 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2714 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2715
2716 /* Initialize our media structures and probe the GMII. */
2717 sc->sc_mii.mii_ifp = ifp;
2718
2719 if (sc->sc_type >= WM_T_82544) {
2720 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2721 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2722 } else {
2723 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2724 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2725 }
2726 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2727
2728 wm_gmii_reset(sc);
2729
2730 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2731 wm_gmii_mediastatus);
2732
2733 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2734 MII_OFFSET_ANY, 0);
2735 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2736 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2737 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2738 } else
2739 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2740 }
2741
2742 /*
2743 * wm_gmii_mediastatus: [ifmedia interface function]
2744 *
2745 * Get the current interface media status on a 1000BASE-T device.
2746 */
2747 void
2748 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2749 {
2750 struct wm_softc *sc = ifp->if_softc;
2751
2752 mii_pollstat(&sc->sc_mii);
2753 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2754 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2755 }
2756
2757 /*
2758 * wm_gmii_mediachange: [ifmedia interface function]
2759 *
2760 * Set hardware to newly-selected media on a 1000BASE-T device.
2761 */
2762 int
2763 wm_gmii_mediachange(struct ifnet *ifp)
2764 {
2765 struct wm_softc *sc = ifp->if_softc;
2766
2767 if (ifp->if_flags & IFF_UP)
2768 mii_mediachg(&sc->sc_mii);
2769 return (0);
2770 }
2771
2772 #define MDI_IO CTRL_SWDPIN(2)
2773 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2774 #define MDI_CLK CTRL_SWDPIN(3)
2775
2776 static void
2777 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2778 {
2779 uint32_t i, v;
2780
2781 v = CSR_READ(sc, WMREG_CTRL);
2782 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2783 v |= MDI_DIR | CTRL_SWDPIO(3);
2784
2785 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2786 if (data & i)
2787 v |= MDI_IO;
2788 else
2789 v &= ~MDI_IO;
2790 CSR_WRITE(sc, WMREG_CTRL, v);
2791 delay(10);
2792 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2793 delay(10);
2794 CSR_WRITE(sc, WMREG_CTRL, v);
2795 delay(10);
2796 }
2797 }
2798
2799 static uint32_t
2800 i82543_mii_recvbits(struct wm_softc *sc)
2801 {
2802 uint32_t v, i, data = 0;
2803
2804 v = CSR_READ(sc, WMREG_CTRL);
2805 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2806 v |= CTRL_SWDPIO(3);
2807
2808 CSR_WRITE(sc, WMREG_CTRL, v);
2809 delay(10);
2810 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2811 delay(10);
2812 CSR_WRITE(sc, WMREG_CTRL, v);
2813 delay(10);
2814
2815 for (i = 0; i < 16; i++) {
2816 data <<= 1;
2817 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2818 delay(10);
2819 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2820 data |= 1;
2821 CSR_WRITE(sc, WMREG_CTRL, v);
2822 delay(10);
2823 }
2824
2825 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2826 delay(10);
2827 CSR_WRITE(sc, WMREG_CTRL, v);
2828 delay(10);
2829
2830 return (data);
2831 }
2832
2833 #undef MDI_IO
2834 #undef MDI_DIR
2835 #undef MDI_CLK
2836
2837 /*
2838 * wm_gmii_i82543_readreg: [mii interface function]
2839 *
2840 * Read a PHY register on the GMII (i82543 version).
2841 */
2842 int
2843 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2844 {
2845 struct wm_softc *sc = (void *) self;
2846 int rv;
2847
2848 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2849 i82543_mii_sendbits(sc, reg | (phy << 5) |
2850 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2851 rv = i82543_mii_recvbits(sc) & 0xffff;
2852
2853 DPRINTF(WM_DEBUG_GMII,
2854 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2855 sc->sc_dev.dv_xname, phy, reg, rv));
2856
2857 return (rv);
2858 }
2859
2860 /*
2861 * wm_gmii_i82543_writereg: [mii interface function]
2862 *
2863 * Write a PHY register on the GMII (i82543 version).
2864 */
2865 void
2866 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2867 {
2868 struct wm_softc *sc = (void *) self;
2869
2870 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2871 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2872 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2873 (MII_COMMAND_START << 30), 32);
2874 }
2875
2876 /*
2877 * wm_gmii_i82544_readreg: [mii interface function]
2878 *
2879 * Read a PHY register on the GMII.
2880 */
2881 int
2882 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2883 {
2884 struct wm_softc *sc = (void *) self;
2885 uint32_t mdic;
2886 int i, rv;
2887
2888 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2889 MDIC_REGADD(reg));
2890
2891 for (i = 0; i < 100; i++) {
2892 mdic = CSR_READ(sc, WMREG_MDIC);
2893 if (mdic & MDIC_READY)
2894 break;
2895 delay(10);
2896 }
2897
2898 if ((mdic & MDIC_READY) == 0) {
2899 printf("%s: MDIC read timed out: phy %d reg %d\n",
2900 sc->sc_dev.dv_xname, phy, reg);
2901 rv = 0;
2902 } else if (mdic & MDIC_E) {
2903 #if 0 /* This is normal if no PHY is present. */
2904 printf("%s: MDIC read error: phy %d reg %d\n",
2905 sc->sc_dev.dv_xname, phy, reg);
2906 #endif
2907 rv = 0;
2908 } else {
2909 rv = MDIC_DATA(mdic);
2910 if (rv == 0xffff)
2911 rv = 0;
2912 }
2913
2914 return (rv);
2915 }
2916
2917 /*
2918 * wm_gmii_i82544_writereg: [mii interface function]
2919 *
2920 * Write a PHY register on the GMII.
2921 */
2922 void
2923 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2924 {
2925 struct wm_softc *sc = (void *) self;
2926 uint32_t mdic;
2927 int i;
2928
2929 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2930 MDIC_REGADD(reg) | MDIC_DATA(val));
2931
2932 for (i = 0; i < 100; i++) {
2933 mdic = CSR_READ(sc, WMREG_MDIC);
2934 if (mdic & MDIC_READY)
2935 break;
2936 delay(10);
2937 }
2938
2939 if ((mdic & MDIC_READY) == 0)
2940 printf("%s: MDIC write timed out: phy %d reg %d\n",
2941 sc->sc_dev.dv_xname, phy, reg);
2942 else if (mdic & MDIC_E)
2943 printf("%s: MDIC write error: phy %d reg %d\n",
2944 sc->sc_dev.dv_xname, phy, reg);
2945 }
2946
2947 /*
2948 * wm_gmii_statchg: [mii interface function]
2949 *
2950 * Callback from MII layer when media changes.
2951 */
2952 void
2953 wm_gmii_statchg(struct device *self)
2954 {
2955 struct wm_softc *sc = (void *) self;
2956
2957 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2958
2959 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2960 DPRINTF(WM_DEBUG_LINK,
2961 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2962 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2963 } else {
2964 DPRINTF(WM_DEBUG_LINK,
2965 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2966 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2967 }
2968
2969 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2970 }
2971