if_wm.c revision 1.35 1 /* $NetBSD: if_wm.c,v 1.35 2003/04/15 21:12:24 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include "bpfilter.h"
52 #include "rnd.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <netinet/in.h> /* XXX for struct ip */
82 #include <netinet/in_systm.h> /* XXX for struct ip */
83 #include <netinet/ip.h> /* XXX for struct ip */
84 #include <netinet/tcp.h> /* XXX for struct tcphdr */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and manage
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 /*
183 * Software state per device.
184 */
185 struct wm_softc {
186 struct device sc_dev; /* generic device information */
187 bus_space_tag_t sc_st; /* bus space tag */
188 bus_space_handle_t sc_sh; /* bus space handle */
189 bus_dma_tag_t sc_dmat; /* bus DMA tag */
190 struct ethercom sc_ethercom; /* ethernet common data */
191 void *sc_sdhook; /* shutdown hook */
192
193 int sc_type; /* chip type; see below */
194 int sc_flags; /* flags; see below */
195
196 void *sc_ih; /* interrupt cookie */
197
198 struct mii_data sc_mii; /* MII/media information */
199
200 struct callout sc_tick_ch; /* tick callout */
201
202 bus_dmamap_t sc_cddmamap; /* control data DMA map */
203 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
204
205 /*
206 * Software state for the transmit and receive descriptors.
207 */
208 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210
211 /*
212 * Control data structures.
213 */
214 struct wm_control_data *sc_control_data;
215 #define sc_txdescs sc_control_data->wcd_txdescs
216 #define sc_rxdescs sc_control_data->wcd_rxdescs
217
218 #ifdef WM_EVENT_COUNTERS
219 /* Event counters. */
220 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
221 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
222 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
223 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
224 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
225 struct evcnt sc_ev_rxintr; /* Rx interrupts */
226 struct evcnt sc_ev_linkintr; /* Link interrupts */
227
228 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
229 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
230 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
231 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
232
233 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
234 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
235 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
236
237 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
239
240 struct evcnt sc_ev_tu; /* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242
243 bus_addr_t sc_tdt_reg; /* offset of TDT register */
244
245 int sc_txfree; /* number of free Tx descriptors */
246 int sc_txnext; /* next ready Tx descriptor */
247
248 int sc_txsfree; /* number of free Tx jobs */
249 int sc_txsnext; /* next free Tx job */
250 int sc_txsdirty; /* dirty Tx jobs */
251
252 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
253 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278
279 #if NRND > 0
280 rndsource_element_t rnd_source; /* random source */
281 #endif
282 };
283
284 #define WM_RXCHAIN_RESET(sc) \
285 do { \
286 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
287 *(sc)->sc_rxtailp = NULL; \
288 (sc)->sc_rxlen = 0; \
289 } while (/*CONSTCOND*/0)
290
291 #define WM_RXCHAIN_LINK(sc, m) \
292 do { \
293 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
294 (sc)->sc_rxtailp = &(m)->m_next; \
295 } while (/*CONSTCOND*/0)
296
297 /* sc_type */
298 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
299 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
300 #define WM_T_82543 2 /* i82543 */
301 #define WM_T_82544 3 /* i82544 */
302 #define WM_T_82540 4 /* i82540 */
303 #define WM_T_82545 5 /* i82545 */
304 #define WM_T_82546 6 /* i82546 */
305
306 /* sc_flags */
307 #define WM_F_HAS_MII 0x01 /* has MII */
308 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
312 #else
313 #define WM_EVCNT_INCR(ev) /* nothing */
314 #endif
315
316 #define CSR_READ(sc, reg) \
317 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
318 #define CSR_WRITE(sc, reg, val) \
319 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
320
321 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
322 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
323
324 #define WM_CDTXSYNC(sc, x, n, ops) \
325 do { \
326 int __x, __n; \
327 \
328 __x = (x); \
329 __n = (n); \
330 \
331 /* If it will wrap around, sync to the end of the ring. */ \
332 if ((__x + __n) > WM_NTXDESC) { \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
335 (WM_NTXDESC - __x), (ops)); \
336 __n -= (WM_NTXDESC - __x); \
337 __x = 0; \
338 } \
339 \
340 /* Now sync whatever is left. */ \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
343 } while (/*CONSTCOND*/0)
344
345 #define WM_CDRXSYNC(sc, x, ops) \
346 do { \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
349 } while (/*CONSTCOND*/0)
350
351 #define WM_INIT_RXDESC(sc, x) \
352 do { \
353 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
354 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
355 struct mbuf *__m = __rxs->rxs_mbuf; \
356 \
357 /* \
358 * Note: We scoot the packet forward 2 bytes in the buffer \
359 * so that the payload after the Ethernet header is aligned \
360 * to a 4-byte boundary. \
361 * \
362 * XXX BRAINDAMAGE ALERT! \
363 * The stupid chip uses the same size for every buffer, which \
364 * is set in the Receive Control register. We are using the 2K \
365 * size option, but what we REALLY want is (2K - 2)! For this \
366 * reason, we can't accept packets longer than the standard \
367 * Ethernet MTU, without incurring a big penalty to copy every \
368 * incoming packet to a new, suitably aligned buffer. \
369 * \
370 * We'll need to make some changes to the layer 3/4 parts of \
371 * the stack (to copy the headers to a new buffer if not \
372 * aligned) in order to support large MTU on this chip. Lame. \
373 */ \
374 __m->m_data = __m->m_ext.ext_buf + 2; \
375 \
376 __rxd->wrx_addr.wa_low = \
377 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
378 __rxd->wrx_addr.wa_high = 0; \
379 __rxd->wrx_len = 0; \
380 __rxd->wrx_cksum = 0; \
381 __rxd->wrx_status = 0; \
382 __rxd->wrx_errors = 0; \
383 __rxd->wrx_special = 0; \
384 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
385 \
386 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
387 } while (/*CONSTCOND*/0)
388
389 void wm_start(struct ifnet *);
390 void wm_watchdog(struct ifnet *);
391 int wm_ioctl(struct ifnet *, u_long, caddr_t);
392 int wm_init(struct ifnet *);
393 void wm_stop(struct ifnet *, int);
394
395 void wm_shutdown(void *);
396
397 void wm_reset(struct wm_softc *);
398 void wm_rxdrain(struct wm_softc *);
399 int wm_add_rxbuf(struct wm_softc *, int);
400 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
401 void wm_tick(void *);
402
403 void wm_set_filter(struct wm_softc *);
404
405 int wm_intr(void *);
406 void wm_txintr(struct wm_softc *);
407 void wm_rxintr(struct wm_softc *);
408 void wm_linkintr(struct wm_softc *, uint32_t);
409
410 void wm_tbi_mediainit(struct wm_softc *);
411 int wm_tbi_mediachange(struct ifnet *);
412 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
413
414 void wm_tbi_set_linkled(struct wm_softc *);
415 void wm_tbi_check_link(struct wm_softc *);
416
417 void wm_gmii_reset(struct wm_softc *);
418
419 int wm_gmii_i82543_readreg(struct device *, int, int);
420 void wm_gmii_i82543_writereg(struct device *, int, int, int);
421
422 int wm_gmii_i82544_readreg(struct device *, int, int);
423 void wm_gmii_i82544_writereg(struct device *, int, int, int);
424
425 void wm_gmii_statchg(struct device *);
426
427 void wm_gmii_mediainit(struct wm_softc *);
428 int wm_gmii_mediachange(struct ifnet *);
429 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
430
431 int wm_match(struct device *, struct cfdata *, void *);
432 void wm_attach(struct device *, struct device *, void *);
433
434 CFATTACH_DECL(wm, sizeof(struct wm_softc),
435 wm_match, wm_attach, NULL, NULL);
436
437 /*
438 * Devices supported by this driver.
439 */
440 const struct wm_product {
441 pci_vendor_id_t wmp_vendor;
442 pci_product_id_t wmp_product;
443 const char *wmp_name;
444 int wmp_type;
445 int wmp_flags;
446 #define WMP_F_1000X 0x01
447 #define WMP_F_1000T 0x02
448 } wm_products[] = {
449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
450 "Intel i82542 1000BASE-X Ethernet",
451 WM_T_82542_2_1, WMP_F_1000X },
452
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
454 "Intel i82543GC 1000BASE-X Ethernet",
455 WM_T_82543, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
458 "Intel i82543GC 1000BASE-T Ethernet",
459 WM_T_82543, WMP_F_1000T },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
462 "Intel i82544EI 1000BASE-T Ethernet",
463 WM_T_82544, WMP_F_1000T },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
466 "Intel i82544EI 1000BASE-X Ethernet",
467 WM_T_82544, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
470 "Intel i82544GC 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
474 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
475 WM_T_82544, WMP_F_1000T },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
478 "Intel i82540EM 1000BASE-T Ethernet",
479 WM_T_82540, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
482 "Intel i82540EP 1000BASE-T Ethernet",
483 WM_T_82540, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
486 "Intel i82540EP 1000BASE-T Ethernet",
487 WM_T_82540, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
490 "Intel i82540EP 1000BASE-T Ethernet",
491 WM_T_82540, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
494 "Intel i82545EM 1000BASE-T Ethernet",
495 WM_T_82545, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
498 "Intel i82546EB 1000BASE-T Ethernet",
499 WM_T_82546, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
502 "Intel i82545EM 1000BASE-X Ethernet",
503 WM_T_82545, WMP_F_1000X },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
506 "Intel i82546EB 1000BASE-X Ethernet",
507 WM_T_82546, WMP_F_1000X },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
510 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
511 WM_T_82540, WMP_F_1000T },
512
513 { 0, 0,
514 NULL,
515 0, 0 },
516 };
517
518 #ifdef WM_EVENT_COUNTERS
519 #if WM_NTXSEGS != 16
520 #error Update wm_txseg_evcnt_names
521 #endif
522 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
523 "txseg1",
524 "txseg2",
525 "txseg3",
526 "txseg4",
527 "txseg5",
528 "txseg6",
529 "txseg7",
530 "txseg8",
531 "txseg9",
532 "txseg10",
533 "txseg11",
534 "txseg12",
535 "txseg13",
536 "txseg14",
537 "txseg15",
538 "txseg16",
539 };
540 #endif /* WM_EVENT_COUNTERS */
541
542 static const struct wm_product *
543 wm_lookup(const struct pci_attach_args *pa)
544 {
545 const struct wm_product *wmp;
546
547 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
548 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
549 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
550 return (wmp);
551 }
552 return (NULL);
553 }
554
555 int
556 wm_match(struct device *parent, struct cfdata *cf, void *aux)
557 {
558 struct pci_attach_args *pa = aux;
559
560 if (wm_lookup(pa) != NULL)
561 return (1);
562
563 return (0);
564 }
565
566 void
567 wm_attach(struct device *parent, struct device *self, void *aux)
568 {
569 struct wm_softc *sc = (void *) self;
570 struct pci_attach_args *pa = aux;
571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
572 pci_chipset_tag_t pc = pa->pa_pc;
573 pci_intr_handle_t ih;
574 const char *intrstr = NULL;
575 bus_space_tag_t memt;
576 bus_space_handle_t memh;
577 bus_dma_segment_t seg;
578 int memh_valid;
579 int i, rseg, error;
580 const struct wm_product *wmp;
581 uint8_t enaddr[ETHER_ADDR_LEN];
582 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
583 pcireg_t preg, memtype;
584 int pmreg;
585
586 callout_init(&sc->sc_tick_ch);
587
588 wmp = wm_lookup(pa);
589 if (wmp == NULL) {
590 printf("\n");
591 panic("wm_attach: impossible");
592 }
593
594 sc->sc_dmat = pa->pa_dmat;
595
596 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
597 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
598
599 sc->sc_type = wmp->wmp_type;
600 if (sc->sc_type < WM_T_82543) {
601 if (preg < 2) {
602 printf("%s: i82542 must be at least rev. 2\n",
603 sc->sc_dev.dv_xname);
604 return;
605 }
606 if (preg < 3)
607 sc->sc_type = WM_T_82542_2_0;
608 }
609
610 /*
611 * Some chips require a handshake to access the EEPROM.
612 */
613 if (sc->sc_type >= WM_T_82540)
614 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
615
616 /*
617 * Map the device.
618 */
619 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
620 switch (memtype) {
621 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
622 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
623 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
624 memtype, 0, &memt, &memh, NULL, NULL) == 0);
625 break;
626 default:
627 memh_valid = 0;
628 }
629
630 if (memh_valid) {
631 sc->sc_st = memt;
632 sc->sc_sh = memh;
633 } else {
634 printf("%s: unable to map device registers\n",
635 sc->sc_dev.dv_xname);
636 return;
637 }
638
639 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
640 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
641 preg |= PCI_COMMAND_MASTER_ENABLE;
642 if (sc->sc_type < WM_T_82542_2_1)
643 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
644 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
645
646 /* Get it out of power save mode, if needed. */
647 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
648 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
649 PCI_PMCSR_STATE_MASK;
650 if (preg == PCI_PMCSR_STATE_D3) {
651 /*
652 * The card has lost all configuration data in
653 * this state, so punt.
654 */
655 printf("%s: unable to wake from power state D3\n",
656 sc->sc_dev.dv_xname);
657 return;
658 }
659 if (preg != PCI_PMCSR_STATE_D0) {
660 printf("%s: waking up from power state D%d\n",
661 sc->sc_dev.dv_xname, preg);
662 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
663 PCI_PMCSR_STATE_D0);
664 }
665 }
666
667 /*
668 * Map and establish our interrupt.
669 */
670 if (pci_intr_map(pa, &ih)) {
671 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
672 return;
673 }
674 intrstr = pci_intr_string(pc, ih);
675 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
676 if (sc->sc_ih == NULL) {
677 printf("%s: unable to establish interrupt",
678 sc->sc_dev.dv_xname);
679 if (intrstr != NULL)
680 printf(" at %s", intrstr);
681 printf("\n");
682 return;
683 }
684 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
685
686 /*
687 * Allocate the control data structures, and create and load the
688 * DMA map for it.
689 */
690 if ((error = bus_dmamem_alloc(sc->sc_dmat,
691 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
692 0)) != 0) {
693 printf("%s: unable to allocate control data, error = %d\n",
694 sc->sc_dev.dv_xname, error);
695 goto fail_0;
696 }
697
698 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
699 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
700 0)) != 0) {
701 printf("%s: unable to map control data, error = %d\n",
702 sc->sc_dev.dv_xname, error);
703 goto fail_1;
704 }
705
706 if ((error = bus_dmamap_create(sc->sc_dmat,
707 sizeof(struct wm_control_data), 1,
708 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
709 printf("%s: unable to create control data DMA map, "
710 "error = %d\n", sc->sc_dev.dv_xname, error);
711 goto fail_2;
712 }
713
714 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
715 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
716 0)) != 0) {
717 printf("%s: unable to load control data DMA map, error = %d\n",
718 sc->sc_dev.dv_xname, error);
719 goto fail_3;
720 }
721
722 /*
723 * Create the transmit buffer DMA maps.
724 */
725 for (i = 0; i < WM_TXQUEUELEN; i++) {
726 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
727 WM_NTXSEGS, MCLBYTES, 0, 0,
728 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
729 printf("%s: unable to create Tx DMA map %d, "
730 "error = %d\n", sc->sc_dev.dv_xname, i, error);
731 goto fail_4;
732 }
733 }
734
735 /*
736 * Create the receive buffer DMA maps.
737 */
738 for (i = 0; i < WM_NRXDESC; i++) {
739 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
740 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
741 printf("%s: unable to create Rx DMA map %d, "
742 "error = %d\n", sc->sc_dev.dv_xname, i, error);
743 goto fail_5;
744 }
745 sc->sc_rxsoft[i].rxs_mbuf = NULL;
746 }
747
748 /*
749 * Reset the chip to a known state.
750 */
751 wm_reset(sc);
752
753 /*
754 * Read the Ethernet address from the EEPROM.
755 */
756 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
757 sizeof(myea) / sizeof(myea[0]), myea);
758 enaddr[0] = myea[0] & 0xff;
759 enaddr[1] = myea[0] >> 8;
760 enaddr[2] = myea[1] & 0xff;
761 enaddr[3] = myea[1] >> 8;
762 enaddr[4] = myea[2] & 0xff;
763 enaddr[5] = myea[2] >> 8;
764
765 /*
766 * Toggle the LSB of the MAC address on the second port
767 * of the i82546.
768 */
769 if (sc->sc_type == WM_T_82546) {
770 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
771 enaddr[5] ^= 1;
772 }
773
774 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
775 ether_sprintf(enaddr));
776
777 /*
778 * Read the config info from the EEPROM, and set up various
779 * bits in the control registers based on their contents.
780 */
781 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
782 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
783 if (sc->sc_type >= WM_T_82544)
784 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
785
786 if (cfg1 & EEPROM_CFG1_ILOS)
787 sc->sc_ctrl |= CTRL_ILOS;
788 if (sc->sc_type >= WM_T_82544) {
789 sc->sc_ctrl |=
790 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
791 CTRL_SWDPIO_SHIFT;
792 sc->sc_ctrl |=
793 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
794 CTRL_SWDPINS_SHIFT;
795 } else {
796 sc->sc_ctrl |=
797 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
798 CTRL_SWDPIO_SHIFT;
799 }
800
801 #if 0
802 if (sc->sc_type >= WM_T_82544) {
803 if (cfg1 & EEPROM_CFG1_IPS0)
804 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
805 if (cfg1 & EEPROM_CFG1_IPS1)
806 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
807 sc->sc_ctrl_ext |=
808 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
809 CTRL_EXT_SWDPIO_SHIFT;
810 sc->sc_ctrl_ext |=
811 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
812 CTRL_EXT_SWDPINS_SHIFT;
813 } else {
814 sc->sc_ctrl_ext |=
815 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
816 CTRL_EXT_SWDPIO_SHIFT;
817 }
818 #endif
819
820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
821 #if 0
822 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
823 #endif
824
825 /*
826 * Set up some register offsets that are different between
827 * the i82542 and the i82543 and later chips.
828 */
829 if (sc->sc_type < WM_T_82543) {
830 sc->sc_rdt_reg = WMREG_OLD_RDT0;
831 sc->sc_tdt_reg = WMREG_OLD_TDT;
832 } else {
833 sc->sc_rdt_reg = WMREG_RDT;
834 sc->sc_tdt_reg = WMREG_TDT;
835 }
836
837 /*
838 * Determine if we should use flow control. We should
839 * always use it, unless we're on a i82542 < 2.1.
840 */
841 if (sc->sc_type >= WM_T_82542_2_1)
842 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
843
844 /*
845 * Determine if we're TBI or GMII mode, and initialize the
846 * media structures accordingly.
847 */
848 if (sc->sc_type < WM_T_82543 ||
849 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
850 if (wmp->wmp_flags & WMP_F_1000T)
851 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
852 "product!\n", sc->sc_dev.dv_xname);
853 wm_tbi_mediainit(sc);
854 } else {
855 if (wmp->wmp_flags & WMP_F_1000X)
856 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
857 "product!\n", sc->sc_dev.dv_xname);
858 wm_gmii_mediainit(sc);
859 }
860
861 ifp = &sc->sc_ethercom.ec_if;
862 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
863 ifp->if_softc = sc;
864 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
865 ifp->if_ioctl = wm_ioctl;
866 ifp->if_start = wm_start;
867 ifp->if_watchdog = wm_watchdog;
868 ifp->if_init = wm_init;
869 ifp->if_stop = wm_stop;
870 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
871 IFQ_SET_READY(&ifp->if_snd);
872
873 /*
874 * If we're a i82543 or greater, we can support VLANs.
875 */
876 if (sc->sc_type >= WM_T_82543)
877 sc->sc_ethercom.ec_capabilities |=
878 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
879
880 /*
881 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
882 * on i82543 and later.
883 */
884 if (sc->sc_type >= WM_T_82543)
885 ifp->if_capabilities |=
886 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
887
888 /*
889 * Attach the interface.
890 */
891 if_attach(ifp);
892 ether_ifattach(ifp, enaddr);
893 #if NRND > 0
894 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
895 RND_TYPE_NET, 0);
896 #endif
897
898 #ifdef WM_EVENT_COUNTERS
899 /* Attach event counters. */
900 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
901 NULL, sc->sc_dev.dv_xname, "txsstall");
902 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
903 NULL, sc->sc_dev.dv_xname, "txdstall");
904 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
905 NULL, sc->sc_dev.dv_xname, "txforceintr");
906 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
907 NULL, sc->sc_dev.dv_xname, "txdw");
908 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
909 NULL, sc->sc_dev.dv_xname, "txqe");
910 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
911 NULL, sc->sc_dev.dv_xname, "rxintr");
912 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
913 NULL, sc->sc_dev.dv_xname, "linkintr");
914
915 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
916 NULL, sc->sc_dev.dv_xname, "rxipsum");
917 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
918 NULL, sc->sc_dev.dv_xname, "rxtusum");
919 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
920 NULL, sc->sc_dev.dv_xname, "txipsum");
921 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
922 NULL, sc->sc_dev.dv_xname, "txtusum");
923
924 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
925 NULL, sc->sc_dev.dv_xname, "txctx init");
926 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "txctx hit");
928 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
929 NULL, sc->sc_dev.dv_xname, "txctx miss");
930
931 for (i = 0; i < WM_NTXSEGS; i++)
932 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
933 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
934
935 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txdrop");
937
938 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
939 NULL, sc->sc_dev.dv_xname, "tu");
940 #endif /* WM_EVENT_COUNTERS */
941
942 /*
943 * Make sure the interface is shutdown during reboot.
944 */
945 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
946 if (sc->sc_sdhook == NULL)
947 printf("%s: WARNING: unable to establish shutdown hook\n",
948 sc->sc_dev.dv_xname);
949 return;
950
951 /*
952 * Free any resources we've allocated during the failed attach
953 * attempt. Do this in reverse order and fall through.
954 */
955 fail_5:
956 for (i = 0; i < WM_NRXDESC; i++) {
957 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
958 bus_dmamap_destroy(sc->sc_dmat,
959 sc->sc_rxsoft[i].rxs_dmamap);
960 }
961 fail_4:
962 for (i = 0; i < WM_TXQUEUELEN; i++) {
963 if (sc->sc_txsoft[i].txs_dmamap != NULL)
964 bus_dmamap_destroy(sc->sc_dmat,
965 sc->sc_txsoft[i].txs_dmamap);
966 }
967 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
968 fail_3:
969 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
970 fail_2:
971 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
972 sizeof(struct wm_control_data));
973 fail_1:
974 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
975 fail_0:
976 return;
977 }
978
979 /*
980 * wm_shutdown:
981 *
982 * Make sure the interface is stopped at reboot time.
983 */
984 void
985 wm_shutdown(void *arg)
986 {
987 struct wm_softc *sc = arg;
988
989 wm_stop(&sc->sc_ethercom.ec_if, 1);
990 }
991
992 /*
993 * wm_tx_cksum:
994 *
995 * Set up TCP/IP checksumming parameters for the
996 * specified packet.
997 */
998 static int
999 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1000 uint32_t *fieldsp)
1001 {
1002 struct mbuf *m0 = txs->txs_mbuf;
1003 struct livengood_tcpip_ctxdesc *t;
1004 uint32_t fields = 0, ipcs, tucs;
1005 struct ip *ip;
1006 struct ether_header *eh;
1007 int offset, iphl;
1008
1009 /*
1010 * XXX It would be nice if the mbuf pkthdr had offset
1011 * fields for the protocol headers.
1012 */
1013
1014 eh = mtod(m0, struct ether_header *);
1015 switch (htons(eh->ether_type)) {
1016 case ETHERTYPE_IP:
1017 iphl = sizeof(struct ip);
1018 offset = ETHER_HDR_LEN;
1019 break;
1020
1021 case ETHERTYPE_VLAN:
1022 iphl = sizeof(struct ip);
1023 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1024 break;
1025
1026 default:
1027 /*
1028 * Don't support this protocol or encapsulation.
1029 */
1030 *fieldsp = 0;
1031 *cmdp = 0;
1032 return (0);
1033 }
1034
1035 /* XXX */
1036 if (m0->m_len < (offset + iphl)) {
1037 printf("%s: wm_tx_cksum: need to m_pullup, "
1038 "packet dropped\n", sc->sc_dev.dv_xname);
1039 return (EINVAL);
1040 }
1041
1042 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1043 iphl = ip->ip_hl << 2;
1044
1045 /*
1046 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1047 * offload feature, if we load the context descriptor, we
1048 * MUST provide valid values for IPCSS and TUCSS fields.
1049 */
1050
1051 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1052 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1053 fields |= htole32(WTX_IXSM);
1054 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1055 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1056 WTX_TCPIP_IPCSE(offset + iphl - 1));
1057 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1058 /* Use the cached value. */
1059 ipcs = sc->sc_txctx_ipcs;
1060 } else {
1061 /* Just initialize it to the likely value anyway. */
1062 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1063 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1064 WTX_TCPIP_IPCSE(offset + iphl - 1));
1065 }
1066
1067 offset += iphl;
1068
1069 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1070 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1071 fields |= htole32(WTX_TXSM);
1072 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1073 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1074 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1075 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1076 /* Use the cached value. */
1077 tucs = sc->sc_txctx_tucs;
1078 } else {
1079 /* Just initialize it to a valid TCP context. */
1080 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1081 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1082 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1083 }
1084
1085 if (sc->sc_txctx_ipcs == ipcs &&
1086 sc->sc_txctx_tucs == tucs) {
1087 /* Cached context is fine. */
1088 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1089 } else {
1090 /* Fill in the context descriptor. */
1091 #ifdef WM_EVENT_COUNTERS
1092 if (sc->sc_txctx_ipcs == 0xffffffff &&
1093 sc->sc_txctx_tucs == 0xffffffff)
1094 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1095 else
1096 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1097 #endif
1098 t = (struct livengood_tcpip_ctxdesc *)
1099 &sc->sc_txdescs[sc->sc_txnext];
1100 t->tcpip_ipcs = ipcs;
1101 t->tcpip_tucs = tucs;
1102 t->tcpip_cmdlen =
1103 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1104 t->tcpip_seg = 0;
1105 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1106
1107 sc->sc_txctx_ipcs = ipcs;
1108 sc->sc_txctx_tucs = tucs;
1109
1110 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1111 txs->txs_ndesc++;
1112 }
1113
1114 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1115 *fieldsp = fields;
1116
1117 return (0);
1118 }
1119
1120 /*
1121 * wm_start: [ifnet interface function]
1122 *
1123 * Start packet transmission on the interface.
1124 */
1125 void
1126 wm_start(struct ifnet *ifp)
1127 {
1128 struct wm_softc *sc = ifp->if_softc;
1129 struct mbuf *m0;
1130 #if 0 /* XXXJRT */
1131 struct m_tag *mtag;
1132 #endif
1133 struct wm_txsoft *txs;
1134 bus_dmamap_t dmamap;
1135 int error, nexttx, lasttx, ofree, seg;
1136 uint32_t cksumcmd, cksumfields;
1137
1138 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1139 return;
1140
1141 /*
1142 * Remember the previous number of free descriptors.
1143 */
1144 ofree = sc->sc_txfree;
1145
1146 /*
1147 * Loop through the send queue, setting up transmit descriptors
1148 * until we drain the queue, or use up all available transmit
1149 * descriptors.
1150 */
1151 for (;;) {
1152 /* Grab a packet off the queue. */
1153 IFQ_POLL(&ifp->if_snd, m0);
1154 if (m0 == NULL)
1155 break;
1156
1157 DPRINTF(WM_DEBUG_TX,
1158 ("%s: TX: have packet to transmit: %p\n",
1159 sc->sc_dev.dv_xname, m0));
1160
1161 /* Get a work queue entry. */
1162 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1163 wm_txintr(sc);
1164 if (sc->sc_txsfree == 0) {
1165 DPRINTF(WM_DEBUG_TX,
1166 ("%s: TX: no free job descriptors\n",
1167 sc->sc_dev.dv_xname));
1168 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1169 break;
1170 }
1171 }
1172
1173 txs = &sc->sc_txsoft[sc->sc_txsnext];
1174 dmamap = txs->txs_dmamap;
1175
1176 /*
1177 * Load the DMA map. If this fails, the packet either
1178 * didn't fit in the allotted number of segments, or we
1179 * were short on resources. For the too-many-segments
1180 * case, we simply report an error and drop the packet,
1181 * since we can't sanely copy a jumbo packet to a single
1182 * buffer.
1183 */
1184 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1185 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1186 if (error) {
1187 if (error == EFBIG) {
1188 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1189 printf("%s: Tx packet consumes too many "
1190 "DMA segments, dropping...\n",
1191 sc->sc_dev.dv_xname);
1192 IFQ_DEQUEUE(&ifp->if_snd, m0);
1193 m_freem(m0);
1194 continue;
1195 }
1196 /*
1197 * Short on resources, just stop for now.
1198 */
1199 DPRINTF(WM_DEBUG_TX,
1200 ("%s: TX: dmamap load failed: %d\n",
1201 sc->sc_dev.dv_xname, error));
1202 break;
1203 }
1204
1205 /*
1206 * Ensure we have enough descriptors free to describe
1207 * the packet. Note, we always reserve one descriptor
1208 * at the end of the ring due to the semantics of the
1209 * TDT register, plus one more in the event we need
1210 * to re-load checksum offload context.
1211 */
1212 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1213 /*
1214 * Not enough free descriptors to transmit this
1215 * packet. We haven't committed anything yet,
1216 * so just unload the DMA map, put the packet
1217 * pack on the queue, and punt. Notify the upper
1218 * layer that there are no more slots left.
1219 */
1220 DPRINTF(WM_DEBUG_TX,
1221 ("%s: TX: need %d descriptors, have %d\n",
1222 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1223 sc->sc_txfree - 1));
1224 ifp->if_flags |= IFF_OACTIVE;
1225 bus_dmamap_unload(sc->sc_dmat, dmamap);
1226 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1227 break;
1228 }
1229
1230 IFQ_DEQUEUE(&ifp->if_snd, m0);
1231
1232 /*
1233 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1234 */
1235
1236 /* Sync the DMA map. */
1237 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1238 BUS_DMASYNC_PREWRITE);
1239
1240 DPRINTF(WM_DEBUG_TX,
1241 ("%s: TX: packet has %d DMA segments\n",
1242 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1243
1244 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1245
1246 /*
1247 * Store a pointer to the packet so that we can free it
1248 * later.
1249 *
1250 * Initially, we consider the number of descriptors the
1251 * packet uses the number of DMA segments. This may be
1252 * incremented by 1 if we do checksum offload (a descriptor
1253 * is used to set the checksum context).
1254 */
1255 txs->txs_mbuf = m0;
1256 txs->txs_firstdesc = sc->sc_txnext;
1257 txs->txs_ndesc = dmamap->dm_nsegs;
1258
1259 /*
1260 * Set up checksum offload parameters for
1261 * this packet.
1262 */
1263 if (m0->m_pkthdr.csum_flags &
1264 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1265 if (wm_tx_cksum(sc, txs, &cksumcmd,
1266 &cksumfields) != 0) {
1267 /* Error message already displayed. */
1268 m_freem(m0);
1269 bus_dmamap_unload(sc->sc_dmat, dmamap);
1270 txs->txs_mbuf = NULL;
1271 continue;
1272 }
1273 } else {
1274 cksumcmd = 0;
1275 cksumfields = 0;
1276 }
1277
1278 cksumcmd |= htole32(WTX_CMD_IDE);
1279
1280 /*
1281 * Initialize the transmit descriptor.
1282 */
1283 for (nexttx = sc->sc_txnext, seg = 0;
1284 seg < dmamap->dm_nsegs;
1285 seg++, nexttx = WM_NEXTTX(nexttx)) {
1286 /*
1287 * Note: we currently only use 32-bit DMA
1288 * addresses.
1289 */
1290 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1291 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1292 htole32(dmamap->dm_segs[seg].ds_addr);
1293 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1294 htole32(dmamap->dm_segs[seg].ds_len);
1295 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1296 cksumfields;
1297 lasttx = nexttx;
1298
1299 DPRINTF(WM_DEBUG_TX,
1300 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1301 sc->sc_dev.dv_xname, nexttx,
1302 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1303 (uint32_t) dmamap->dm_segs[seg].ds_len));
1304 }
1305
1306 /*
1307 * Set up the command byte on the last descriptor of
1308 * the packet. If we're in the interrupt delay window,
1309 * delay the interrupt.
1310 */
1311 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1312 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1313
1314 #if 0 /* XXXJRT */
1315 /*
1316 * If VLANs are enabled and the packet has a VLAN tag, set
1317 * up the descriptor to encapsulate the packet for us.
1318 *
1319 * This is only valid on the last descriptor of the packet.
1320 */
1321 if (sc->sc_ethercom.ec_nvlans != 0 &&
1322 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1323 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1324 htole32(WTX_CMD_VLE);
1325 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1326 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1327 }
1328 #endif /* XXXJRT */
1329
1330 txs->txs_lastdesc = lasttx;
1331
1332 DPRINTF(WM_DEBUG_TX,
1333 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1334 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1335
1336 /* Sync the descriptors we're using. */
1337 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1338 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1339
1340 /* Give the packet to the chip. */
1341 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1342
1343 DPRINTF(WM_DEBUG_TX,
1344 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1345
1346 DPRINTF(WM_DEBUG_TX,
1347 ("%s: TX: finished transmitting packet, job %d\n",
1348 sc->sc_dev.dv_xname, sc->sc_txsnext));
1349
1350 /* Advance the tx pointer. */
1351 sc->sc_txfree -= txs->txs_ndesc;
1352 sc->sc_txnext = nexttx;
1353
1354 sc->sc_txsfree--;
1355 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1356
1357 #if NBPFILTER > 0
1358 /* Pass the packet to any BPF listeners. */
1359 if (ifp->if_bpf)
1360 bpf_mtap(ifp->if_bpf, m0);
1361 #endif /* NBPFILTER > 0 */
1362 }
1363
1364 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1365 /* No more slots; notify upper layer. */
1366 ifp->if_flags |= IFF_OACTIVE;
1367 }
1368
1369 if (sc->sc_txfree != ofree) {
1370 /* Set a watchdog timer in case the chip flakes out. */
1371 ifp->if_timer = 5;
1372 }
1373 }
1374
1375 /*
1376 * wm_watchdog: [ifnet interface function]
1377 *
1378 * Watchdog timer handler.
1379 */
1380 void
1381 wm_watchdog(struct ifnet *ifp)
1382 {
1383 struct wm_softc *sc = ifp->if_softc;
1384
1385 /*
1386 * Since we're using delayed interrupts, sweep up
1387 * before we report an error.
1388 */
1389 wm_txintr(sc);
1390
1391 if (sc->sc_txfree != WM_NTXDESC) {
1392 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1393 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1394 sc->sc_txnext);
1395 ifp->if_oerrors++;
1396
1397 /* Reset the interface. */
1398 (void) wm_init(ifp);
1399 }
1400
1401 /* Try to get more packets going. */
1402 wm_start(ifp);
1403 }
1404
1405 /*
1406 * wm_ioctl: [ifnet interface function]
1407 *
1408 * Handle control requests from the operator.
1409 */
1410 int
1411 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1412 {
1413 struct wm_softc *sc = ifp->if_softc;
1414 struct ifreq *ifr = (struct ifreq *) data;
1415 int s, error;
1416
1417 s = splnet();
1418
1419 switch (cmd) {
1420 case SIOCSIFMEDIA:
1421 case SIOCGIFMEDIA:
1422 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1423 break;
1424
1425 default:
1426 error = ether_ioctl(ifp, cmd, data);
1427 if (error == ENETRESET) {
1428 /*
1429 * Multicast list has changed; set the hardware filter
1430 * accordingly.
1431 */
1432 wm_set_filter(sc);
1433 error = 0;
1434 }
1435 break;
1436 }
1437
1438 /* Try to get more packets going. */
1439 wm_start(ifp);
1440
1441 splx(s);
1442 return (error);
1443 }
1444
1445 /*
1446 * wm_intr:
1447 *
1448 * Interrupt service routine.
1449 */
1450 int
1451 wm_intr(void *arg)
1452 {
1453 struct wm_softc *sc = arg;
1454 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1455 uint32_t icr;
1456 int wantinit, handled = 0;
1457
1458 for (wantinit = 0; wantinit == 0;) {
1459 icr = CSR_READ(sc, WMREG_ICR);
1460 if ((icr & sc->sc_icr) == 0)
1461 break;
1462
1463 #if 0 /*NRND > 0*/
1464 if (RND_ENABLED(&sc->rnd_source))
1465 rnd_add_uint32(&sc->rnd_source, icr);
1466 #endif
1467
1468 handled = 1;
1469
1470 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1471 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1472 DPRINTF(WM_DEBUG_RX,
1473 ("%s: RX: got Rx intr 0x%08x\n",
1474 sc->sc_dev.dv_xname,
1475 icr & (ICR_RXDMT0|ICR_RXT0)));
1476 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1477 }
1478 #endif
1479 wm_rxintr(sc);
1480
1481 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1482 if (icr & ICR_TXDW) {
1483 DPRINTF(WM_DEBUG_TX,
1484 ("%s: TX: got TDXW interrupt\n",
1485 sc->sc_dev.dv_xname));
1486 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1487 }
1488 #endif
1489 wm_txintr(sc);
1490
1491 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1492 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1493 wm_linkintr(sc, icr);
1494 }
1495
1496 if (icr & ICR_RXO) {
1497 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1498 wantinit = 1;
1499 }
1500 }
1501
1502 if (handled) {
1503 if (wantinit)
1504 wm_init(ifp);
1505
1506 /* Try to get more packets going. */
1507 wm_start(ifp);
1508 }
1509
1510 return (handled);
1511 }
1512
1513 /*
1514 * wm_txintr:
1515 *
1516 * Helper; handle transmit interrupts.
1517 */
1518 void
1519 wm_txintr(struct wm_softc *sc)
1520 {
1521 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1522 struct wm_txsoft *txs;
1523 uint8_t status;
1524 int i;
1525
1526 ifp->if_flags &= ~IFF_OACTIVE;
1527
1528 /*
1529 * Go through the Tx list and free mbufs for those
1530 * frames which have been transmitted.
1531 */
1532 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1533 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1534 txs = &sc->sc_txsoft[i];
1535
1536 DPRINTF(WM_DEBUG_TX,
1537 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1538
1539 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1540 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1541
1542 status = le32toh(sc->sc_txdescs[
1543 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1544 if ((status & WTX_ST_DD) == 0) {
1545 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1546 BUS_DMASYNC_PREREAD);
1547 break;
1548 }
1549
1550 DPRINTF(WM_DEBUG_TX,
1551 ("%s: TX: job %d done: descs %d..%d\n",
1552 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1553 txs->txs_lastdesc));
1554
1555 /*
1556 * XXX We should probably be using the statistics
1557 * XXX registers, but I don't know if they exist
1558 * XXX on chips before the i82544.
1559 */
1560
1561 #ifdef WM_EVENT_COUNTERS
1562 if (status & WTX_ST_TU)
1563 WM_EVCNT_INCR(&sc->sc_ev_tu);
1564 #endif /* WM_EVENT_COUNTERS */
1565
1566 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1567 ifp->if_oerrors++;
1568 if (status & WTX_ST_LC)
1569 printf("%s: late collision\n",
1570 sc->sc_dev.dv_xname);
1571 else if (status & WTX_ST_EC) {
1572 ifp->if_collisions += 16;
1573 printf("%s: excessive collisions\n",
1574 sc->sc_dev.dv_xname);
1575 }
1576 } else
1577 ifp->if_opackets++;
1578
1579 sc->sc_txfree += txs->txs_ndesc;
1580 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1581 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1582 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1583 m_freem(txs->txs_mbuf);
1584 txs->txs_mbuf = NULL;
1585 }
1586
1587 /* Update the dirty transmit buffer pointer. */
1588 sc->sc_txsdirty = i;
1589 DPRINTF(WM_DEBUG_TX,
1590 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1591
1592 /*
1593 * If there are no more pending transmissions, cancel the watchdog
1594 * timer.
1595 */
1596 if (sc->sc_txsfree == WM_TXQUEUELEN)
1597 ifp->if_timer = 0;
1598 }
1599
1600 /*
1601 * wm_rxintr:
1602 *
1603 * Helper; handle receive interrupts.
1604 */
1605 void
1606 wm_rxintr(struct wm_softc *sc)
1607 {
1608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1609 struct wm_rxsoft *rxs;
1610 struct mbuf *m;
1611 int i, len;
1612 uint8_t status, errors;
1613
1614 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1615 rxs = &sc->sc_rxsoft[i];
1616
1617 DPRINTF(WM_DEBUG_RX,
1618 ("%s: RX: checking descriptor %d\n",
1619 sc->sc_dev.dv_xname, i));
1620
1621 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1622
1623 status = sc->sc_rxdescs[i].wrx_status;
1624 errors = sc->sc_rxdescs[i].wrx_errors;
1625 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1626
1627 if ((status & WRX_ST_DD) == 0) {
1628 /*
1629 * We have processed all of the receive descriptors.
1630 */
1631 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1632 break;
1633 }
1634
1635 if (__predict_false(sc->sc_rxdiscard)) {
1636 DPRINTF(WM_DEBUG_RX,
1637 ("%s: RX: discarding contents of descriptor %d\n",
1638 sc->sc_dev.dv_xname, i));
1639 WM_INIT_RXDESC(sc, i);
1640 if (status & WRX_ST_EOP) {
1641 /* Reset our state. */
1642 DPRINTF(WM_DEBUG_RX,
1643 ("%s: RX: resetting rxdiscard -> 0\n",
1644 sc->sc_dev.dv_xname));
1645 sc->sc_rxdiscard = 0;
1646 }
1647 continue;
1648 }
1649
1650 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1651 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1652
1653 m = rxs->rxs_mbuf;
1654
1655 /*
1656 * Add a new receive buffer to the ring.
1657 */
1658 if (wm_add_rxbuf(sc, i) != 0) {
1659 /*
1660 * Failed, throw away what we've done so
1661 * far, and discard the rest of the packet.
1662 */
1663 ifp->if_ierrors++;
1664 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1665 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1666 WM_INIT_RXDESC(sc, i);
1667 if ((status & WRX_ST_EOP) == 0)
1668 sc->sc_rxdiscard = 1;
1669 if (sc->sc_rxhead != NULL)
1670 m_freem(sc->sc_rxhead);
1671 WM_RXCHAIN_RESET(sc);
1672 DPRINTF(WM_DEBUG_RX,
1673 ("%s: RX: Rx buffer allocation failed, "
1674 "dropping packet%s\n", sc->sc_dev.dv_xname,
1675 sc->sc_rxdiscard ? " (discard)" : ""));
1676 continue;
1677 }
1678
1679 WM_RXCHAIN_LINK(sc, m);
1680
1681 m->m_len = len;
1682
1683 DPRINTF(WM_DEBUG_RX,
1684 ("%s: RX: buffer at %p len %d\n",
1685 sc->sc_dev.dv_xname, m->m_data, len));
1686
1687 /*
1688 * If this is not the end of the packet, keep
1689 * looking.
1690 */
1691 if ((status & WRX_ST_EOP) == 0) {
1692 sc->sc_rxlen += len;
1693 DPRINTF(WM_DEBUG_RX,
1694 ("%s: RX: not yet EOP, rxlen -> %d\n",
1695 sc->sc_dev.dv_xname, sc->sc_rxlen));
1696 continue;
1697 }
1698
1699 /*
1700 * Okay, we have the entire packet now...
1701 */
1702 *sc->sc_rxtailp = NULL;
1703 m = sc->sc_rxhead;
1704 len += sc->sc_rxlen;
1705
1706 WM_RXCHAIN_RESET(sc);
1707
1708 DPRINTF(WM_DEBUG_RX,
1709 ("%s: RX: have entire packet, len -> %d\n",
1710 sc->sc_dev.dv_xname, len));
1711
1712 /*
1713 * If an error occurred, update stats and drop the packet.
1714 */
1715 if (errors &
1716 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1717 ifp->if_ierrors++;
1718 if (errors & WRX_ER_SE)
1719 printf("%s: symbol error\n",
1720 sc->sc_dev.dv_xname);
1721 else if (errors & WRX_ER_SEQ)
1722 printf("%s: receive sequence error\n",
1723 sc->sc_dev.dv_xname);
1724 else if (errors & WRX_ER_CE)
1725 printf("%s: CRC error\n",
1726 sc->sc_dev.dv_xname);
1727 m_freem(m);
1728 continue;
1729 }
1730
1731 /*
1732 * No errors. Receive the packet.
1733 *
1734 * Note, we have configured the chip to include the
1735 * CRC with every packet.
1736 */
1737 m->m_flags |= M_HASFCS;
1738 m->m_pkthdr.rcvif = ifp;
1739 m->m_pkthdr.len = len;
1740
1741 #if 0 /* XXXJRT */
1742 /*
1743 * If VLANs are enabled, VLAN packets have been unwrapped
1744 * for us. Associate the tag with the packet.
1745 */
1746 if (sc->sc_ethercom.ec_nvlans != 0 &&
1747 (status & WRX_ST_VP) != 0) {
1748 struct m_tag *vtag;
1749
1750 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1751 M_NOWAIT);
1752 if (vtag == NULL) {
1753 ifp->if_ierrors++;
1754 printf("%s: unable to allocate VLAN tag\n",
1755 sc->sc_dev.dv_xname);
1756 m_freem(m);
1757 continue;
1758 }
1759
1760 *(u_int *)(vtag + 1) =
1761 le16toh(sc->sc_rxdescs[i].wrx_special);
1762 }
1763 #endif /* XXXJRT */
1764
1765 /*
1766 * Set up checksum info for this packet.
1767 */
1768 if (status & WRX_ST_IPCS) {
1769 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1770 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1771 if (errors & WRX_ER_IPE)
1772 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1773 }
1774 if (status & WRX_ST_TCPCS) {
1775 /*
1776 * Note: we don't know if this was TCP or UDP,
1777 * so we just set both bits, and expect the
1778 * upper layers to deal.
1779 */
1780 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1781 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1782 if (errors & WRX_ER_TCPE)
1783 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1784 }
1785
1786 ifp->if_ipackets++;
1787
1788 #if NBPFILTER > 0
1789 /* Pass this up to any BPF listeners. */
1790 if (ifp->if_bpf)
1791 bpf_mtap(ifp->if_bpf, m);
1792 #endif /* NBPFILTER > 0 */
1793
1794 /* Pass it on. */
1795 (*ifp->if_input)(ifp, m);
1796 }
1797
1798 /* Update the receive pointer. */
1799 sc->sc_rxptr = i;
1800
1801 DPRINTF(WM_DEBUG_RX,
1802 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1803 }
1804
1805 /*
1806 * wm_linkintr:
1807 *
1808 * Helper; handle link interrupts.
1809 */
1810 void
1811 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1812 {
1813 uint32_t status;
1814
1815 /*
1816 * If we get a link status interrupt on a 1000BASE-T
1817 * device, just fall into the normal MII tick path.
1818 */
1819 if (sc->sc_flags & WM_F_HAS_MII) {
1820 if (icr & ICR_LSC) {
1821 DPRINTF(WM_DEBUG_LINK,
1822 ("%s: LINK: LSC -> mii_tick\n",
1823 sc->sc_dev.dv_xname));
1824 mii_tick(&sc->sc_mii);
1825 } else if (icr & ICR_RXSEQ) {
1826 DPRINTF(WM_DEBUG_LINK,
1827 ("%s: LINK Receive sequence error\n",
1828 sc->sc_dev.dv_xname));
1829 }
1830 return;
1831 }
1832
1833 /*
1834 * If we are now receiving /C/, check for link again in
1835 * a couple of link clock ticks.
1836 */
1837 if (icr & ICR_RXCFG) {
1838 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1839 sc->sc_dev.dv_xname));
1840 sc->sc_tbi_anstate = 2;
1841 }
1842
1843 if (icr & ICR_LSC) {
1844 status = CSR_READ(sc, WMREG_STATUS);
1845 if (status & STATUS_LU) {
1846 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1847 sc->sc_dev.dv_xname,
1848 (status & STATUS_FD) ? "FDX" : "HDX"));
1849 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1850 if (status & STATUS_FD)
1851 sc->sc_tctl |=
1852 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1853 else
1854 sc->sc_tctl |=
1855 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1856 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1857 sc->sc_tbi_linkup = 1;
1858 } else {
1859 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1860 sc->sc_dev.dv_xname));
1861 sc->sc_tbi_linkup = 0;
1862 }
1863 sc->sc_tbi_anstate = 2;
1864 wm_tbi_set_linkled(sc);
1865 } else if (icr & ICR_RXSEQ) {
1866 DPRINTF(WM_DEBUG_LINK,
1867 ("%s: LINK: Receive sequence error\n",
1868 sc->sc_dev.dv_xname));
1869 }
1870 }
1871
1872 /*
1873 * wm_tick:
1874 *
1875 * One second timer, used to check link status, sweep up
1876 * completed transmit jobs, etc.
1877 */
1878 void
1879 wm_tick(void *arg)
1880 {
1881 struct wm_softc *sc = arg;
1882 int s;
1883
1884 s = splnet();
1885
1886 if (sc->sc_flags & WM_F_HAS_MII)
1887 mii_tick(&sc->sc_mii);
1888 else
1889 wm_tbi_check_link(sc);
1890
1891 splx(s);
1892
1893 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1894 }
1895
1896 /*
1897 * wm_reset:
1898 *
1899 * Reset the i82542 chip.
1900 */
1901 void
1902 wm_reset(struct wm_softc *sc)
1903 {
1904 int i;
1905
1906 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1907 delay(10000);
1908
1909 for (i = 0; i < 1000; i++) {
1910 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1911 return;
1912 delay(20);
1913 }
1914
1915 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1916 printf("%s: WARNING: reset failed to complete\n",
1917 sc->sc_dev.dv_xname);
1918 }
1919
1920 /*
1921 * wm_init: [ifnet interface function]
1922 *
1923 * Initialize the interface. Must be called at splnet().
1924 */
1925 int
1926 wm_init(struct ifnet *ifp)
1927 {
1928 struct wm_softc *sc = ifp->if_softc;
1929 struct wm_rxsoft *rxs;
1930 int i, error = 0;
1931 uint32_t reg;
1932
1933 /* Cancel any pending I/O. */
1934 wm_stop(ifp, 0);
1935
1936 /* Reset the chip to a known state. */
1937 wm_reset(sc);
1938
1939 /* Initialize the transmit descriptor ring. */
1940 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1941 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1942 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1943 sc->sc_txfree = WM_NTXDESC;
1944 sc->sc_txnext = 0;
1945
1946 sc->sc_txctx_ipcs = 0xffffffff;
1947 sc->sc_txctx_tucs = 0xffffffff;
1948
1949 if (sc->sc_type < WM_T_82543) {
1950 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1951 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1952 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1953 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1954 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1955 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1956 } else {
1957 CSR_WRITE(sc, WMREG_TBDAH, 0);
1958 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1959 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1960 CSR_WRITE(sc, WMREG_TDH, 0);
1961 CSR_WRITE(sc, WMREG_TDT, 0);
1962 CSR_WRITE(sc, WMREG_TIDV, 128);
1963
1964 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1965 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1966 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1967 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1968 }
1969 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1970 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1971
1972 /* Initialize the transmit job descriptors. */
1973 for (i = 0; i < WM_TXQUEUELEN; i++)
1974 sc->sc_txsoft[i].txs_mbuf = NULL;
1975 sc->sc_txsfree = WM_TXQUEUELEN;
1976 sc->sc_txsnext = 0;
1977 sc->sc_txsdirty = 0;
1978
1979 /*
1980 * Initialize the receive descriptor and receive job
1981 * descriptor rings.
1982 */
1983 if (sc->sc_type < WM_T_82543) {
1984 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1985 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1986 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1987 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1988 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1989 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1990
1991 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1992 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1993 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1994 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1995 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1996 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1997 } else {
1998 CSR_WRITE(sc, WMREG_RDBAH, 0);
1999 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2000 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2001 CSR_WRITE(sc, WMREG_RDH, 0);
2002 CSR_WRITE(sc, WMREG_RDT, 0);
2003 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2004 }
2005 for (i = 0; i < WM_NRXDESC; i++) {
2006 rxs = &sc->sc_rxsoft[i];
2007 if (rxs->rxs_mbuf == NULL) {
2008 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2009 printf("%s: unable to allocate or map rx "
2010 "buffer %d, error = %d\n",
2011 sc->sc_dev.dv_xname, i, error);
2012 /*
2013 * XXX Should attempt to run with fewer receive
2014 * XXX buffers instead of just failing.
2015 */
2016 wm_rxdrain(sc);
2017 goto out;
2018 }
2019 } else
2020 WM_INIT_RXDESC(sc, i);
2021 }
2022 sc->sc_rxptr = 0;
2023 sc->sc_rxdiscard = 0;
2024 WM_RXCHAIN_RESET(sc);
2025
2026 /*
2027 * Clear out the VLAN table -- we don't use it (yet).
2028 */
2029 CSR_WRITE(sc, WMREG_VET, 0);
2030 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2031 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2032
2033 /*
2034 * Set up flow-control parameters.
2035 *
2036 * XXX Values could probably stand some tuning.
2037 */
2038 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2039 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2040 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2041 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2042
2043 if (sc->sc_type < WM_T_82543) {
2044 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2045 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2046 } else {
2047 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2048 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2049 }
2050 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2051 }
2052
2053 #if 0 /* XXXJRT */
2054 /* Deal with VLAN enables. */
2055 if (sc->sc_ethercom.ec_nvlans != 0)
2056 sc->sc_ctrl |= CTRL_VME;
2057 else
2058 #endif /* XXXJRT */
2059 sc->sc_ctrl &= ~CTRL_VME;
2060
2061 /* Write the control registers. */
2062 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2063 #if 0
2064 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2065 #endif
2066
2067 /*
2068 * Set up checksum offload parameters.
2069 */
2070 reg = CSR_READ(sc, WMREG_RXCSUM);
2071 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2072 reg |= RXCSUM_IPOFL;
2073 else
2074 reg &= ~RXCSUM_IPOFL;
2075 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2076 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2077 else {
2078 reg &= ~RXCSUM_TUOFL;
2079 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2080 reg &= ~RXCSUM_IPOFL;
2081 }
2082 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2083
2084 /*
2085 * Set up the interrupt registers.
2086 */
2087 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2088 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2089 ICR_RXO | ICR_RXT0;
2090 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2091 sc->sc_icr |= ICR_RXCFG;
2092 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2093
2094 /* Set up the inter-packet gap. */
2095 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2096
2097 #if 0 /* XXXJRT */
2098 /* Set the VLAN ethernetype. */
2099 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2100 #endif
2101
2102 /*
2103 * Set up the transmit control register; we start out with
2104 * a collision distance suitable for FDX, but update it whe
2105 * we resolve the media type.
2106 */
2107 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2108 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2109 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2110
2111 /* Set the media. */
2112 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2113
2114 /*
2115 * Set up the receive control register; we actually program
2116 * the register when we set the receive filter. Use multicast
2117 * address offset type 0.
2118 *
2119 * Only the i82544 has the ability to strip the incoming
2120 * CRC, so we don't enable that feature.
2121 */
2122 sc->sc_mchash_type = 0;
2123 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2124 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2125
2126 /* Set the receive filter. */
2127 wm_set_filter(sc);
2128
2129 /* Start the one second link check clock. */
2130 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2131
2132 /* ...all done! */
2133 ifp->if_flags |= IFF_RUNNING;
2134 ifp->if_flags &= ~IFF_OACTIVE;
2135
2136 out:
2137 if (error)
2138 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2139 return (error);
2140 }
2141
2142 /*
2143 * wm_rxdrain:
2144 *
2145 * Drain the receive queue.
2146 */
2147 void
2148 wm_rxdrain(struct wm_softc *sc)
2149 {
2150 struct wm_rxsoft *rxs;
2151 int i;
2152
2153 for (i = 0; i < WM_NRXDESC; i++) {
2154 rxs = &sc->sc_rxsoft[i];
2155 if (rxs->rxs_mbuf != NULL) {
2156 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2157 m_freem(rxs->rxs_mbuf);
2158 rxs->rxs_mbuf = NULL;
2159 }
2160 }
2161 }
2162
2163 /*
2164 * wm_stop: [ifnet interface function]
2165 *
2166 * Stop transmission on the interface.
2167 */
2168 void
2169 wm_stop(struct ifnet *ifp, int disable)
2170 {
2171 struct wm_softc *sc = ifp->if_softc;
2172 struct wm_txsoft *txs;
2173 int i;
2174
2175 /* Stop the one second clock. */
2176 callout_stop(&sc->sc_tick_ch);
2177
2178 if (sc->sc_flags & WM_F_HAS_MII) {
2179 /* Down the MII. */
2180 mii_down(&sc->sc_mii);
2181 }
2182
2183 /* Stop the transmit and receive processes. */
2184 CSR_WRITE(sc, WMREG_TCTL, 0);
2185 CSR_WRITE(sc, WMREG_RCTL, 0);
2186
2187 /* Release any queued transmit buffers. */
2188 for (i = 0; i < WM_TXQUEUELEN; i++) {
2189 txs = &sc->sc_txsoft[i];
2190 if (txs->txs_mbuf != NULL) {
2191 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2192 m_freem(txs->txs_mbuf);
2193 txs->txs_mbuf = NULL;
2194 }
2195 }
2196
2197 if (disable)
2198 wm_rxdrain(sc);
2199
2200 /* Mark the interface as down and cancel the watchdog timer. */
2201 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2202 ifp->if_timer = 0;
2203 }
2204
2205 /*
2206 * wm_read_eeprom:
2207 *
2208 * Read data from the serial EEPROM.
2209 */
2210 void
2211 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2212 {
2213 uint32_t reg;
2214 int i, x, addrbits = 6;
2215
2216 for (i = 0; i < wordcnt; i++) {
2217 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2218 reg = CSR_READ(sc, WMREG_EECD);
2219
2220 /* Get number of address bits. */
2221 if (reg & EECD_EE_SIZE)
2222 addrbits = 8;
2223
2224 /* Request EEPROM access. */
2225 reg |= EECD_EE_REQ;
2226 CSR_WRITE(sc, WMREG_EECD, reg);
2227
2228 /* ..and wait for it to be granted. */
2229 for (x = 0; x < 100; x++) {
2230 reg = CSR_READ(sc, WMREG_EECD);
2231 if (reg & EECD_EE_GNT)
2232 break;
2233 delay(5);
2234 }
2235 if ((reg & EECD_EE_GNT) == 0) {
2236 printf("%s: could not acquire EEPROM GNT\n",
2237 sc->sc_dev.dv_xname);
2238 *data = 0xffff;
2239 reg &= ~EECD_EE_REQ;
2240 CSR_WRITE(sc, WMREG_EECD, reg);
2241 continue;
2242 }
2243 } else
2244 reg = 0;
2245
2246 /* Clear SK and DI. */
2247 reg &= ~(EECD_SK | EECD_DI);
2248 CSR_WRITE(sc, WMREG_EECD, reg);
2249
2250 /* Set CHIP SELECT. */
2251 reg |= EECD_CS;
2252 CSR_WRITE(sc, WMREG_EECD, reg);
2253 delay(2);
2254
2255 /* Shift in the READ command. */
2256 for (x = 3; x > 0; x--) {
2257 if (UWIRE_OPC_READ & (1 << (x - 1)))
2258 reg |= EECD_DI;
2259 else
2260 reg &= ~EECD_DI;
2261 CSR_WRITE(sc, WMREG_EECD, reg);
2262 delay(2);
2263 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2264 delay(2);
2265 CSR_WRITE(sc, WMREG_EECD, reg);
2266 delay(2);
2267 }
2268
2269 /* Shift in address. */
2270 for (x = addrbits; x > 0; x--) {
2271 if ((word + i) & (1 << (x - 1)))
2272 reg |= EECD_DI;
2273 else
2274 reg &= ~EECD_DI;
2275 CSR_WRITE(sc, WMREG_EECD, reg);
2276 delay(2);
2277 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2278 delay(2);
2279 CSR_WRITE(sc, WMREG_EECD, reg);
2280 delay(2);
2281 }
2282
2283 /* Shift out the data. */
2284 reg &= ~EECD_DI;
2285 data[i] = 0;
2286 for (x = 16; x > 0; x--) {
2287 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2288 delay(2);
2289 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2290 data[i] |= (1 << (x - 1));
2291 CSR_WRITE(sc, WMREG_EECD, reg);
2292 delay(2);
2293 }
2294
2295 /* Clear CHIP SELECT. */
2296 reg &= ~EECD_CS;
2297 CSR_WRITE(sc, WMREG_EECD, reg);
2298 delay(2);
2299
2300 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2301 /* Release the EEPROM. */
2302 reg &= ~EECD_EE_REQ;
2303 CSR_WRITE(sc, WMREG_EECD, reg);
2304 }
2305 }
2306 }
2307
2308 /*
2309 * wm_add_rxbuf:
2310 *
2311 * Add a receive buffer to the indiciated descriptor.
2312 */
2313 int
2314 wm_add_rxbuf(struct wm_softc *sc, int idx)
2315 {
2316 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2317 struct mbuf *m;
2318 int error;
2319
2320 MGETHDR(m, M_DONTWAIT, MT_DATA);
2321 if (m == NULL)
2322 return (ENOBUFS);
2323
2324 MCLGET(m, M_DONTWAIT);
2325 if ((m->m_flags & M_EXT) == 0) {
2326 m_freem(m);
2327 return (ENOBUFS);
2328 }
2329
2330 if (rxs->rxs_mbuf != NULL)
2331 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2332
2333 rxs->rxs_mbuf = m;
2334
2335 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2336 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2337 BUS_DMA_READ|BUS_DMA_NOWAIT);
2338 if (error) {
2339 printf("%s: unable to load rx DMA map %d, error = %d\n",
2340 sc->sc_dev.dv_xname, idx, error);
2341 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2342 }
2343
2344 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2345 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2346
2347 WM_INIT_RXDESC(sc, idx);
2348
2349 return (0);
2350 }
2351
2352 /*
2353 * wm_set_ral:
2354 *
2355 * Set an entery in the receive address list.
2356 */
2357 static void
2358 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2359 {
2360 uint32_t ral_lo, ral_hi;
2361
2362 if (enaddr != NULL) {
2363 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2364 (enaddr[3] << 24);
2365 ral_hi = enaddr[4] | (enaddr[5] << 8);
2366 ral_hi |= RAL_AV;
2367 } else {
2368 ral_lo = 0;
2369 ral_hi = 0;
2370 }
2371
2372 if (sc->sc_type >= WM_T_82544) {
2373 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2374 ral_lo);
2375 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2376 ral_hi);
2377 } else {
2378 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2379 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2380 }
2381 }
2382
2383 /*
2384 * wm_mchash:
2385 *
2386 * Compute the hash of the multicast address for the 4096-bit
2387 * multicast filter.
2388 */
2389 static uint32_t
2390 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2391 {
2392 static const int lo_shift[4] = { 4, 3, 2, 0 };
2393 static const int hi_shift[4] = { 4, 5, 6, 8 };
2394 uint32_t hash;
2395
2396 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2397 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2398
2399 return (hash & 0xfff);
2400 }
2401
2402 /*
2403 * wm_set_filter:
2404 *
2405 * Set up the receive filter.
2406 */
2407 void
2408 wm_set_filter(struct wm_softc *sc)
2409 {
2410 struct ethercom *ec = &sc->sc_ethercom;
2411 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2412 struct ether_multi *enm;
2413 struct ether_multistep step;
2414 bus_addr_t mta_reg;
2415 uint32_t hash, reg, bit;
2416 int i;
2417
2418 if (sc->sc_type >= WM_T_82544)
2419 mta_reg = WMREG_CORDOVA_MTA;
2420 else
2421 mta_reg = WMREG_MTA;
2422
2423 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2424
2425 if (ifp->if_flags & IFF_BROADCAST)
2426 sc->sc_rctl |= RCTL_BAM;
2427 if (ifp->if_flags & IFF_PROMISC) {
2428 sc->sc_rctl |= RCTL_UPE;
2429 goto allmulti;
2430 }
2431
2432 /*
2433 * Set the station address in the first RAL slot, and
2434 * clear the remaining slots.
2435 */
2436 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2437 for (i = 1; i < WM_RAL_TABSIZE; i++)
2438 wm_set_ral(sc, NULL, i);
2439
2440 /* Clear out the multicast table. */
2441 for (i = 0; i < WM_MC_TABSIZE; i++)
2442 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2443
2444 ETHER_FIRST_MULTI(step, ec, enm);
2445 while (enm != NULL) {
2446 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2447 /*
2448 * We must listen to a range of multicast addresses.
2449 * For now, just accept all multicasts, rather than
2450 * trying to set only those filter bits needed to match
2451 * the range. (At this time, the only use of address
2452 * ranges is for IP multicast routing, for which the
2453 * range is big enough to require all bits set.)
2454 */
2455 goto allmulti;
2456 }
2457
2458 hash = wm_mchash(sc, enm->enm_addrlo);
2459
2460 reg = (hash >> 5) & 0x7f;
2461 bit = hash & 0x1f;
2462
2463 hash = CSR_READ(sc, mta_reg + (reg << 2));
2464 hash |= 1U << bit;
2465
2466 /* XXX Hardware bug?? */
2467 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2468 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2469 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2470 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2471 } else
2472 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2473
2474 ETHER_NEXT_MULTI(step, enm);
2475 }
2476
2477 ifp->if_flags &= ~IFF_ALLMULTI;
2478 goto setit;
2479
2480 allmulti:
2481 ifp->if_flags |= IFF_ALLMULTI;
2482 sc->sc_rctl |= RCTL_MPE;
2483
2484 setit:
2485 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2486 }
2487
2488 /*
2489 * wm_tbi_mediainit:
2490 *
2491 * Initialize media for use on 1000BASE-X devices.
2492 */
2493 void
2494 wm_tbi_mediainit(struct wm_softc *sc)
2495 {
2496 const char *sep = "";
2497
2498 if (sc->sc_type < WM_T_82543)
2499 sc->sc_tipg = TIPG_WM_DFLT;
2500 else
2501 sc->sc_tipg = TIPG_LG_DFLT;
2502
2503 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2504 wm_tbi_mediastatus);
2505
2506 /*
2507 * SWD Pins:
2508 *
2509 * 0 = Link LED (output)
2510 * 1 = Loss Of Signal (input)
2511 */
2512 sc->sc_ctrl |= CTRL_SWDPIO(0);
2513 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2514
2515 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2516
2517 #define ADD(ss, mm, dd) \
2518 do { \
2519 printf("%s%s", sep, ss); \
2520 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2521 sep = ", "; \
2522 } while (/*CONSTCOND*/0)
2523
2524 printf("%s: ", sc->sc_dev.dv_xname);
2525 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2526 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2527 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2528 printf("\n");
2529
2530 #undef ADD
2531
2532 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2533 }
2534
2535 /*
2536 * wm_tbi_mediastatus: [ifmedia interface function]
2537 *
2538 * Get the current interface media status on a 1000BASE-X device.
2539 */
2540 void
2541 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2542 {
2543 struct wm_softc *sc = ifp->if_softc;
2544
2545 ifmr->ifm_status = IFM_AVALID;
2546 ifmr->ifm_active = IFM_ETHER;
2547
2548 if (sc->sc_tbi_linkup == 0) {
2549 ifmr->ifm_active |= IFM_NONE;
2550 return;
2551 }
2552
2553 ifmr->ifm_status |= IFM_ACTIVE;
2554 ifmr->ifm_active |= IFM_1000_SX;
2555 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2556 ifmr->ifm_active |= IFM_FDX;
2557 }
2558
2559 /*
2560 * wm_tbi_mediachange: [ifmedia interface function]
2561 *
2562 * Set hardware to newly-selected media on a 1000BASE-X device.
2563 */
2564 int
2565 wm_tbi_mediachange(struct ifnet *ifp)
2566 {
2567 struct wm_softc *sc = ifp->if_softc;
2568 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2569 uint32_t status;
2570 int i;
2571
2572 sc->sc_txcw = ife->ifm_data;
2573 if (sc->sc_ctrl & CTRL_RFCE)
2574 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2575 if (sc->sc_ctrl & CTRL_TFCE)
2576 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2577 sc->sc_txcw |= TXCW_ANE;
2578
2579 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2580 delay(10000);
2581
2582 sc->sc_tbi_anstate = 0;
2583
2584 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2585 /* Have signal; wait for the link to come up. */
2586 for (i = 0; i < 50; i++) {
2587 delay(10000);
2588 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2589 break;
2590 }
2591
2592 status = CSR_READ(sc, WMREG_STATUS);
2593 if (status & STATUS_LU) {
2594 /* Link is up. */
2595 DPRINTF(WM_DEBUG_LINK,
2596 ("%s: LINK: set media -> link up %s\n",
2597 sc->sc_dev.dv_xname,
2598 (status & STATUS_FD) ? "FDX" : "HDX"));
2599 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2600 if (status & STATUS_FD)
2601 sc->sc_tctl |=
2602 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2603 else
2604 sc->sc_tctl |=
2605 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2606 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2607 sc->sc_tbi_linkup = 1;
2608 } else {
2609 /* Link is down. */
2610 DPRINTF(WM_DEBUG_LINK,
2611 ("%s: LINK: set media -> link down\n",
2612 sc->sc_dev.dv_xname));
2613 sc->sc_tbi_linkup = 0;
2614 }
2615 } else {
2616 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2617 sc->sc_dev.dv_xname));
2618 sc->sc_tbi_linkup = 0;
2619 }
2620
2621 wm_tbi_set_linkled(sc);
2622
2623 return (0);
2624 }
2625
2626 /*
2627 * wm_tbi_set_linkled:
2628 *
2629 * Update the link LED on 1000BASE-X devices.
2630 */
2631 void
2632 wm_tbi_set_linkled(struct wm_softc *sc)
2633 {
2634
2635 if (sc->sc_tbi_linkup)
2636 sc->sc_ctrl |= CTRL_SWDPIN(0);
2637 else
2638 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2639
2640 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2641 }
2642
2643 /*
2644 * wm_tbi_check_link:
2645 *
2646 * Check the link on 1000BASE-X devices.
2647 */
2648 void
2649 wm_tbi_check_link(struct wm_softc *sc)
2650 {
2651 uint32_t rxcw, ctrl, status;
2652
2653 if (sc->sc_tbi_anstate == 0)
2654 return;
2655 else if (sc->sc_tbi_anstate > 1) {
2656 DPRINTF(WM_DEBUG_LINK,
2657 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2658 sc->sc_tbi_anstate));
2659 sc->sc_tbi_anstate--;
2660 return;
2661 }
2662
2663 sc->sc_tbi_anstate = 0;
2664
2665 rxcw = CSR_READ(sc, WMREG_RXCW);
2666 ctrl = CSR_READ(sc, WMREG_CTRL);
2667 status = CSR_READ(sc, WMREG_STATUS);
2668
2669 if ((status & STATUS_LU) == 0) {
2670 DPRINTF(WM_DEBUG_LINK,
2671 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2672 sc->sc_tbi_linkup = 0;
2673 } else {
2674 DPRINTF(WM_DEBUG_LINK,
2675 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2676 (status & STATUS_FD) ? "FDX" : "HDX"));
2677 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2678 if (status & STATUS_FD)
2679 sc->sc_tctl |=
2680 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2681 else
2682 sc->sc_tctl |=
2683 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2684 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2685 sc->sc_tbi_linkup = 1;
2686 }
2687
2688 wm_tbi_set_linkled(sc);
2689 }
2690
2691 /*
2692 * wm_gmii_reset:
2693 *
2694 * Reset the PHY.
2695 */
2696 void
2697 wm_gmii_reset(struct wm_softc *sc)
2698 {
2699 uint32_t reg;
2700
2701 if (sc->sc_type >= WM_T_82544) {
2702 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2703 delay(20000);
2704
2705 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2706 delay(20000);
2707 } else {
2708 /* The PHY reset pin is active-low. */
2709 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2710 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2711 CTRL_EXT_SWDPIN(4));
2712 reg |= CTRL_EXT_SWDPIO(4);
2713
2714 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2715 delay(10);
2716
2717 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2718 delay(10);
2719
2720 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2721 delay(10);
2722 #if 0
2723 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2724 #endif
2725 }
2726 }
2727
2728 /*
2729 * wm_gmii_mediainit:
2730 *
2731 * Initialize media for use on 1000BASE-T devices.
2732 */
2733 void
2734 wm_gmii_mediainit(struct wm_softc *sc)
2735 {
2736 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2737
2738 /* We have MII. */
2739 sc->sc_flags |= WM_F_HAS_MII;
2740
2741 sc->sc_tipg = TIPG_1000T_DFLT;
2742
2743 /*
2744 * Let the chip set speed/duplex on its own based on
2745 * signals from the PHY.
2746 */
2747 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2748 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2749
2750 /* Initialize our media structures and probe the GMII. */
2751 sc->sc_mii.mii_ifp = ifp;
2752
2753 if (sc->sc_type >= WM_T_82544) {
2754 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2755 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2756 } else {
2757 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2758 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2759 }
2760 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2761
2762 wm_gmii_reset(sc);
2763
2764 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2765 wm_gmii_mediastatus);
2766
2767 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2768 MII_OFFSET_ANY, 0);
2769 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2770 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2771 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2772 } else
2773 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2774 }
2775
2776 /*
2777 * wm_gmii_mediastatus: [ifmedia interface function]
2778 *
2779 * Get the current interface media status on a 1000BASE-T device.
2780 */
2781 void
2782 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2783 {
2784 struct wm_softc *sc = ifp->if_softc;
2785
2786 mii_pollstat(&sc->sc_mii);
2787 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2788 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2789 }
2790
2791 /*
2792 * wm_gmii_mediachange: [ifmedia interface function]
2793 *
2794 * Set hardware to newly-selected media on a 1000BASE-T device.
2795 */
2796 int
2797 wm_gmii_mediachange(struct ifnet *ifp)
2798 {
2799 struct wm_softc *sc = ifp->if_softc;
2800
2801 if (ifp->if_flags & IFF_UP)
2802 mii_mediachg(&sc->sc_mii);
2803 return (0);
2804 }
2805
2806 #define MDI_IO CTRL_SWDPIN(2)
2807 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2808 #define MDI_CLK CTRL_SWDPIN(3)
2809
2810 static void
2811 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2812 {
2813 uint32_t i, v;
2814
2815 v = CSR_READ(sc, WMREG_CTRL);
2816 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2817 v |= MDI_DIR | CTRL_SWDPIO(3);
2818
2819 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2820 if (data & i)
2821 v |= MDI_IO;
2822 else
2823 v &= ~MDI_IO;
2824 CSR_WRITE(sc, WMREG_CTRL, v);
2825 delay(10);
2826 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2827 delay(10);
2828 CSR_WRITE(sc, WMREG_CTRL, v);
2829 delay(10);
2830 }
2831 }
2832
2833 static uint32_t
2834 i82543_mii_recvbits(struct wm_softc *sc)
2835 {
2836 uint32_t v, i, data = 0;
2837
2838 v = CSR_READ(sc, WMREG_CTRL);
2839 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2840 v |= CTRL_SWDPIO(3);
2841
2842 CSR_WRITE(sc, WMREG_CTRL, v);
2843 delay(10);
2844 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2845 delay(10);
2846 CSR_WRITE(sc, WMREG_CTRL, v);
2847 delay(10);
2848
2849 for (i = 0; i < 16; i++) {
2850 data <<= 1;
2851 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2852 delay(10);
2853 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2854 data |= 1;
2855 CSR_WRITE(sc, WMREG_CTRL, v);
2856 delay(10);
2857 }
2858
2859 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2860 delay(10);
2861 CSR_WRITE(sc, WMREG_CTRL, v);
2862 delay(10);
2863
2864 return (data);
2865 }
2866
2867 #undef MDI_IO
2868 #undef MDI_DIR
2869 #undef MDI_CLK
2870
2871 /*
2872 * wm_gmii_i82543_readreg: [mii interface function]
2873 *
2874 * Read a PHY register on the GMII (i82543 version).
2875 */
2876 int
2877 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2878 {
2879 struct wm_softc *sc = (void *) self;
2880 int rv;
2881
2882 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2883 i82543_mii_sendbits(sc, reg | (phy << 5) |
2884 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2885 rv = i82543_mii_recvbits(sc) & 0xffff;
2886
2887 DPRINTF(WM_DEBUG_GMII,
2888 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2889 sc->sc_dev.dv_xname, phy, reg, rv));
2890
2891 return (rv);
2892 }
2893
2894 /*
2895 * wm_gmii_i82543_writereg: [mii interface function]
2896 *
2897 * Write a PHY register on the GMII (i82543 version).
2898 */
2899 void
2900 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2901 {
2902 struct wm_softc *sc = (void *) self;
2903
2904 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2905 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2906 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2907 (MII_COMMAND_START << 30), 32);
2908 }
2909
2910 /*
2911 * wm_gmii_i82544_readreg: [mii interface function]
2912 *
2913 * Read a PHY register on the GMII.
2914 */
2915 int
2916 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2917 {
2918 struct wm_softc *sc = (void *) self;
2919 uint32_t mdic;
2920 int i, rv;
2921
2922 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2923 MDIC_REGADD(reg));
2924
2925 for (i = 0; i < 100; i++) {
2926 mdic = CSR_READ(sc, WMREG_MDIC);
2927 if (mdic & MDIC_READY)
2928 break;
2929 delay(10);
2930 }
2931
2932 if ((mdic & MDIC_READY) == 0) {
2933 printf("%s: MDIC read timed out: phy %d reg %d\n",
2934 sc->sc_dev.dv_xname, phy, reg);
2935 rv = 0;
2936 } else if (mdic & MDIC_E) {
2937 #if 0 /* This is normal if no PHY is present. */
2938 printf("%s: MDIC read error: phy %d reg %d\n",
2939 sc->sc_dev.dv_xname, phy, reg);
2940 #endif
2941 rv = 0;
2942 } else {
2943 rv = MDIC_DATA(mdic);
2944 if (rv == 0xffff)
2945 rv = 0;
2946 }
2947
2948 return (rv);
2949 }
2950
2951 /*
2952 * wm_gmii_i82544_writereg: [mii interface function]
2953 *
2954 * Write a PHY register on the GMII.
2955 */
2956 void
2957 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2958 {
2959 struct wm_softc *sc = (void *) self;
2960 uint32_t mdic;
2961 int i;
2962
2963 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2964 MDIC_REGADD(reg) | MDIC_DATA(val));
2965
2966 for (i = 0; i < 100; i++) {
2967 mdic = CSR_READ(sc, WMREG_MDIC);
2968 if (mdic & MDIC_READY)
2969 break;
2970 delay(10);
2971 }
2972
2973 if ((mdic & MDIC_READY) == 0)
2974 printf("%s: MDIC write timed out: phy %d reg %d\n",
2975 sc->sc_dev.dv_xname, phy, reg);
2976 else if (mdic & MDIC_E)
2977 printf("%s: MDIC write error: phy %d reg %d\n",
2978 sc->sc_dev.dv_xname, phy, reg);
2979 }
2980
2981 /*
2982 * wm_gmii_statchg: [mii interface function]
2983 *
2984 * Callback from MII layer when media changes.
2985 */
2986 void
2987 wm_gmii_statchg(struct device *self)
2988 {
2989 struct wm_softc *sc = (void *) self;
2990
2991 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2992
2993 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2994 DPRINTF(WM_DEBUG_LINK,
2995 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2996 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2997 } else {
2998 DPRINTF(WM_DEBUG_LINK,
2999 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3000 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3001 }
3002
3003 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3004 }
3005