if_wm.c revision 1.34 1 /* $NetBSD: if_wm.c,v 1.34 2003/04/05 13:23:17 kent Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include "bpfilter.h"
52 #include "rnd.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <netinet/in.h> /* XXX for struct ip */
82 #include <netinet/in_systm.h> /* XXX for struct ip */
83 #include <netinet/ip.h> /* XXX for struct ip */
84 #include <netinet/tcp.h> /* XXX for struct tcphdr */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and manage
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 /*
183 * Software state per device.
184 */
185 struct wm_softc {
186 struct device sc_dev; /* generic device information */
187 bus_space_tag_t sc_st; /* bus space tag */
188 bus_space_handle_t sc_sh; /* bus space handle */
189 bus_dma_tag_t sc_dmat; /* bus DMA tag */
190 struct ethercom sc_ethercom; /* ethernet common data */
191 void *sc_sdhook; /* shutdown hook */
192
193 int sc_type; /* chip type; see below */
194 int sc_flags; /* flags; see below */
195
196 void *sc_ih; /* interrupt cookie */
197
198 struct mii_data sc_mii; /* MII/media information */
199
200 struct callout sc_tick_ch; /* tick callout */
201
202 bus_dmamap_t sc_cddmamap; /* control data DMA map */
203 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
204
205 /*
206 * Software state for the transmit and receive descriptors.
207 */
208 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210
211 /*
212 * Control data structures.
213 */
214 struct wm_control_data *sc_control_data;
215 #define sc_txdescs sc_control_data->wcd_txdescs
216 #define sc_rxdescs sc_control_data->wcd_rxdescs
217
218 #ifdef WM_EVENT_COUNTERS
219 /* Event counters. */
220 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
221 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
222 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
223 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
224 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
225 struct evcnt sc_ev_rxintr; /* Rx interrupts */
226 struct evcnt sc_ev_linkintr; /* Link interrupts */
227
228 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
229 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
230 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
231 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
232
233 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
234 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
235 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
236
237 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
239
240 struct evcnt sc_ev_tu; /* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242
243 bus_addr_t sc_tdt_reg; /* offset of TDT register */
244
245 int sc_txfree; /* number of free Tx descriptors */
246 int sc_txnext; /* next ready Tx descriptor */
247
248 int sc_txsfree; /* number of free Tx jobs */
249 int sc_txsnext; /* next free Tx job */
250 int sc_txsdirty; /* dirty Tx jobs */
251
252 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
253 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278
279 #if NRND > 0
280 rndsource_element_t rnd_source; /* random source */
281 #endif
282 };
283
284 #define WM_RXCHAIN_RESET(sc) \
285 do { \
286 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
287 *(sc)->sc_rxtailp = NULL; \
288 (sc)->sc_rxlen = 0; \
289 } while (/*CONSTCOND*/0)
290
291 #define WM_RXCHAIN_LINK(sc, m) \
292 do { \
293 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
294 (sc)->sc_rxtailp = &(m)->m_next; \
295 } while (/*CONSTCOND*/0)
296
297 /* sc_type */
298 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
299 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
300 #define WM_T_82543 2 /* i82543 */
301 #define WM_T_82544 3 /* i82544 */
302 #define WM_T_82540 4 /* i82540 */
303 #define WM_T_82545 5 /* i82545 */
304 #define WM_T_82546 6 /* i82546 */
305
306 /* sc_flags */
307 #define WM_F_HAS_MII 0x01 /* has MII */
308 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
312 #else
313 #define WM_EVCNT_INCR(ev) /* nothing */
314 #endif
315
316 #define CSR_READ(sc, reg) \
317 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
318 #define CSR_WRITE(sc, reg, val) \
319 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
320
321 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
322 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
323
324 #define WM_CDTXSYNC(sc, x, n, ops) \
325 do { \
326 int __x, __n; \
327 \
328 __x = (x); \
329 __n = (n); \
330 \
331 /* If it will wrap around, sync to the end of the ring. */ \
332 if ((__x + __n) > WM_NTXDESC) { \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
335 (WM_NTXDESC - __x), (ops)); \
336 __n -= (WM_NTXDESC - __x); \
337 __x = 0; \
338 } \
339 \
340 /* Now sync whatever is left. */ \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
343 } while (/*CONSTCOND*/0)
344
345 #define WM_CDRXSYNC(sc, x, ops) \
346 do { \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
349 } while (/*CONSTCOND*/0)
350
351 #define WM_INIT_RXDESC(sc, x) \
352 do { \
353 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
354 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
355 struct mbuf *__m = __rxs->rxs_mbuf; \
356 \
357 /* \
358 * Note: We scoot the packet forward 2 bytes in the buffer \
359 * so that the payload after the Ethernet header is aligned \
360 * to a 4-byte boundary. \
361 * \
362 * XXX BRAINDAMAGE ALERT! \
363 * The stupid chip uses the same size for every buffer, which \
364 * is set in the Receive Control register. We are using the 2K \
365 * size option, but what we REALLY want is (2K - 2)! For this \
366 * reason, we can't accept packets longer than the standard \
367 * Ethernet MTU, without incurring a big penalty to copy every \
368 * incoming packet to a new, suitably aligned buffer. \
369 * \
370 * We'll need to make some changes to the layer 3/4 parts of \
371 * the stack (to copy the headers to a new buffer if not \
372 * aligned) in order to support large MTU on this chip. Lame. \
373 */ \
374 __m->m_data = __m->m_ext.ext_buf + 2; \
375 \
376 __rxd->wrx_addr.wa_low = \
377 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
378 __rxd->wrx_addr.wa_high = 0; \
379 __rxd->wrx_len = 0; \
380 __rxd->wrx_cksum = 0; \
381 __rxd->wrx_status = 0; \
382 __rxd->wrx_errors = 0; \
383 __rxd->wrx_special = 0; \
384 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
385 \
386 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
387 } while (/*CONSTCOND*/0)
388
389 void wm_start(struct ifnet *);
390 void wm_watchdog(struct ifnet *);
391 int wm_ioctl(struct ifnet *, u_long, caddr_t);
392 int wm_init(struct ifnet *);
393 void wm_stop(struct ifnet *, int);
394
395 void wm_shutdown(void *);
396
397 void wm_reset(struct wm_softc *);
398 void wm_rxdrain(struct wm_softc *);
399 int wm_add_rxbuf(struct wm_softc *, int);
400 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
401 void wm_tick(void *);
402
403 void wm_set_filter(struct wm_softc *);
404
405 int wm_intr(void *);
406 void wm_txintr(struct wm_softc *);
407 void wm_rxintr(struct wm_softc *);
408 void wm_linkintr(struct wm_softc *, uint32_t);
409
410 void wm_tbi_mediainit(struct wm_softc *);
411 int wm_tbi_mediachange(struct ifnet *);
412 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
413
414 void wm_tbi_set_linkled(struct wm_softc *);
415 void wm_tbi_check_link(struct wm_softc *);
416
417 void wm_gmii_reset(struct wm_softc *);
418
419 int wm_gmii_i82543_readreg(struct device *, int, int);
420 void wm_gmii_i82543_writereg(struct device *, int, int, int);
421
422 int wm_gmii_i82544_readreg(struct device *, int, int);
423 void wm_gmii_i82544_writereg(struct device *, int, int, int);
424
425 void wm_gmii_statchg(struct device *);
426
427 void wm_gmii_mediainit(struct wm_softc *);
428 int wm_gmii_mediachange(struct ifnet *);
429 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
430
431 int wm_match(struct device *, struct cfdata *, void *);
432 void wm_attach(struct device *, struct device *, void *);
433
434 CFATTACH_DECL(wm, sizeof(struct wm_softc),
435 wm_match, wm_attach, NULL, NULL);
436
437 /*
438 * Devices supported by this driver.
439 */
440 const struct wm_product {
441 pci_vendor_id_t wmp_vendor;
442 pci_product_id_t wmp_product;
443 const char *wmp_name;
444 int wmp_type;
445 int wmp_flags;
446 #define WMP_F_1000X 0x01
447 #define WMP_F_1000T 0x02
448 } wm_products[] = {
449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
450 "Intel i82542 1000BASE-X Ethernet",
451 WM_T_82542_2_1, WMP_F_1000X },
452
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
454 "Intel i82543GC 1000BASE-X Ethernet",
455 WM_T_82543, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
458 "Intel i82543GC 1000BASE-T Ethernet",
459 WM_T_82543, WMP_F_1000T },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
462 "Intel i82544EI 1000BASE-T Ethernet",
463 WM_T_82544, WMP_F_1000T },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
466 "Intel i82544EI 1000BASE-X Ethernet",
467 WM_T_82544, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
470 "Intel i82544GC 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
474 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
475 WM_T_82544, WMP_F_1000T },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
478 "Intel i82540EM 1000BASE-T Ethernet",
479 WM_T_82540, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
482 "Intel i82540EP 1000BASE-T Ethernet",
483 WM_T_82540, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
486 "Intel i82540EP 1000BASE-T Ethernet",
487 WM_T_82540, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
490 "Intel i82540EP 1000BASE-T Ethernet",
491 WM_T_82540, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
494 "Intel i82545EM 1000BASE-T Ethernet",
495 WM_T_82545, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
498 "Intel i82546EB 1000BASE-T Ethernet",
499 WM_T_82546, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
502 "Intel i82545EM 1000BASE-X Ethernet",
503 WM_T_82545, WMP_F_1000X },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
506 "Intel i82546EB 1000BASE-X Ethernet",
507 WM_T_82546, WMP_F_1000X },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
510 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
511 WM_T_82540, WMP_F_1000T },
512
513 { 0, 0,
514 NULL,
515 0, 0 },
516 };
517
518 #ifdef WM_EVENT_COUNTERS
519 #if WM_NTXSEGS != 16
520 #error Update wm_txseg_evcnt_names
521 #endif
522 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
523 "txseg1",
524 "txseg2",
525 "txseg3",
526 "txseg4",
527 "txseg5",
528 "txseg6",
529 "txseg7",
530 "txseg8",
531 "txseg9",
532 "txseg10",
533 "txseg11",
534 "txseg12",
535 "txseg13",
536 "txseg14",
537 "txseg15",
538 "txseg16",
539 };
540 #endif /* WM_EVENT_COUNTERS */
541
542 static const struct wm_product *
543 wm_lookup(const struct pci_attach_args *pa)
544 {
545 const struct wm_product *wmp;
546
547 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
548 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
549 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
550 return (wmp);
551 }
552 return (NULL);
553 }
554
555 int
556 wm_match(struct device *parent, struct cfdata *cf, void *aux)
557 {
558 struct pci_attach_args *pa = aux;
559
560 if (wm_lookup(pa) != NULL)
561 return (1);
562
563 return (0);
564 }
565
566 void
567 wm_attach(struct device *parent, struct device *self, void *aux)
568 {
569 struct wm_softc *sc = (void *) self;
570 struct pci_attach_args *pa = aux;
571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
572 pci_chipset_tag_t pc = pa->pa_pc;
573 pci_intr_handle_t ih;
574 const char *intrstr = NULL;
575 bus_space_tag_t memt;
576 bus_space_handle_t memh;
577 bus_dma_segment_t seg;
578 int memh_valid;
579 int i, rseg, error;
580 const struct wm_product *wmp;
581 uint8_t enaddr[ETHER_ADDR_LEN];
582 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
583 pcireg_t preg, memtype;
584 int pmreg;
585
586 callout_init(&sc->sc_tick_ch);
587
588 wmp = wm_lookup(pa);
589 if (wmp == NULL) {
590 printf("\n");
591 panic("wm_attach: impossible");
592 }
593
594 sc->sc_dmat = pa->pa_dmat;
595
596 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
597 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
598
599 sc->sc_type = wmp->wmp_type;
600 if (sc->sc_type < WM_T_82543) {
601 if (preg < 2) {
602 printf("%s: i82542 must be at least rev. 2\n",
603 sc->sc_dev.dv_xname);
604 return;
605 }
606 if (preg < 3)
607 sc->sc_type = WM_T_82542_2_0;
608 }
609
610 /*
611 * Some chips require a handshake to access the EEPROM.
612 */
613 if (sc->sc_type >= WM_T_82540)
614 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
615
616 /*
617 * Map the device.
618 */
619 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
620 switch (memtype) {
621 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
622 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
623 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
624 memtype, 0, &memt, &memh, NULL, NULL) == 0);
625 break;
626 default:
627 memh_valid = 0;
628 }
629
630 if (memh_valid) {
631 sc->sc_st = memt;
632 sc->sc_sh = memh;
633 } else {
634 printf("%s: unable to map device registers\n",
635 sc->sc_dev.dv_xname);
636 return;
637 }
638
639 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
640 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
641 preg |= PCI_COMMAND_MASTER_ENABLE;
642 if (sc->sc_type < WM_T_82542_2_1)
643 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
644 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
645
646 /* Get it out of power save mode, if needed. */
647 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
648 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
649 PCI_PMCSR_STATE_MASK;
650 if (preg == PCI_PMCSR_STATE_D3) {
651 /*
652 * The card has lost all configuration data in
653 * this state, so punt.
654 */
655 printf("%s: unable to wake from power state D3\n",
656 sc->sc_dev.dv_xname);
657 return;
658 }
659 if (preg != PCI_PMCSR_STATE_D0) {
660 printf("%s: waking up from power state D%d\n",
661 sc->sc_dev.dv_xname, preg);
662 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
663 PCI_PMCSR_STATE_D0);
664 }
665 }
666
667 /*
668 * Map and establish our interrupt.
669 */
670 if (pci_intr_map(pa, &ih)) {
671 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
672 return;
673 }
674 intrstr = pci_intr_string(pc, ih);
675 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
676 if (sc->sc_ih == NULL) {
677 printf("%s: unable to establish interrupt",
678 sc->sc_dev.dv_xname);
679 if (intrstr != NULL)
680 printf(" at %s", intrstr);
681 printf("\n");
682 return;
683 }
684 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
685
686 /*
687 * Allocate the control data structures, and create and load the
688 * DMA map for it.
689 */
690 if ((error = bus_dmamem_alloc(sc->sc_dmat,
691 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
692 0)) != 0) {
693 printf("%s: unable to allocate control data, error = %d\n",
694 sc->sc_dev.dv_xname, error);
695 goto fail_0;
696 }
697
698 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
699 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
700 0)) != 0) {
701 printf("%s: unable to map control data, error = %d\n",
702 sc->sc_dev.dv_xname, error);
703 goto fail_1;
704 }
705
706 if ((error = bus_dmamap_create(sc->sc_dmat,
707 sizeof(struct wm_control_data), 1,
708 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
709 printf("%s: unable to create control data DMA map, "
710 "error = %d\n", sc->sc_dev.dv_xname, error);
711 goto fail_2;
712 }
713
714 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
715 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
716 0)) != 0) {
717 printf("%s: unable to load control data DMA map, error = %d\n",
718 sc->sc_dev.dv_xname, error);
719 goto fail_3;
720 }
721
722 /*
723 * Create the transmit buffer DMA maps.
724 */
725 for (i = 0; i < WM_TXQUEUELEN; i++) {
726 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
727 WM_NTXSEGS, MCLBYTES, 0, 0,
728 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
729 printf("%s: unable to create Tx DMA map %d, "
730 "error = %d\n", sc->sc_dev.dv_xname, i, error);
731 goto fail_4;
732 }
733 }
734
735 /*
736 * Create the receive buffer DMA maps.
737 */
738 for (i = 0; i < WM_NRXDESC; i++) {
739 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
740 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
741 printf("%s: unable to create Rx DMA map %d, "
742 "error = %d\n", sc->sc_dev.dv_xname, i, error);
743 goto fail_5;
744 }
745 sc->sc_rxsoft[i].rxs_mbuf = NULL;
746 }
747
748 /*
749 * Reset the chip to a known state.
750 */
751 wm_reset(sc);
752
753 /*
754 * Read the Ethernet address from the EEPROM.
755 */
756 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
757 sizeof(myea) / sizeof(myea[0]), myea);
758 enaddr[0] = myea[0] & 0xff;
759 enaddr[1] = myea[0] >> 8;
760 enaddr[2] = myea[1] & 0xff;
761 enaddr[3] = myea[1] >> 8;
762 enaddr[4] = myea[2] & 0xff;
763 enaddr[5] = myea[2] >> 8;
764
765 /*
766 * Toggle the LSB of the MAC address on the second port
767 * of the i82546.
768 */
769 if (sc->sc_type == WM_T_82546) {
770 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
771 enaddr[5] ^= 1;
772 }
773
774 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
775 ether_sprintf(enaddr));
776
777 /*
778 * Read the config info from the EEPROM, and set up various
779 * bits in the control registers based on their contents.
780 */
781 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
782 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
783 if (sc->sc_type >= WM_T_82544)
784 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
785
786 if (cfg1 & EEPROM_CFG1_ILOS)
787 sc->sc_ctrl |= CTRL_ILOS;
788 if (sc->sc_type >= WM_T_82544) {
789 sc->sc_ctrl |=
790 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
791 CTRL_SWDPIO_SHIFT;
792 sc->sc_ctrl |=
793 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
794 CTRL_SWDPINS_SHIFT;
795 } else {
796 sc->sc_ctrl |=
797 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
798 CTRL_SWDPIO_SHIFT;
799 }
800
801 #if 0
802 if (sc->sc_type >= WM_T_82544) {
803 if (cfg1 & EEPROM_CFG1_IPS0)
804 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
805 if (cfg1 & EEPROM_CFG1_IPS1)
806 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
807 sc->sc_ctrl_ext |=
808 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
809 CTRL_EXT_SWDPIO_SHIFT;
810 sc->sc_ctrl_ext |=
811 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
812 CTRL_EXT_SWDPINS_SHIFT;
813 } else {
814 sc->sc_ctrl_ext |=
815 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
816 CTRL_EXT_SWDPIO_SHIFT;
817 }
818 #endif
819
820 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
821 #if 0
822 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
823 #endif
824
825 /*
826 * Set up some register offsets that are different between
827 * the i82542 and the i82543 and later chips.
828 */
829 if (sc->sc_type < WM_T_82543) {
830 sc->sc_rdt_reg = WMREG_OLD_RDT0;
831 sc->sc_tdt_reg = WMREG_OLD_TDT;
832 } else {
833 sc->sc_rdt_reg = WMREG_RDT;
834 sc->sc_tdt_reg = WMREG_TDT;
835 }
836
837 /*
838 * Determine if we should use flow control. We should
839 * always use it, unless we're on a i82542 < 2.1.
840 */
841 if (sc->sc_type >= WM_T_82542_2_1)
842 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
843
844 /*
845 * Determine if we're TBI or GMII mode, and initialize the
846 * media structures accordingly.
847 */
848 if (sc->sc_type < WM_T_82543 ||
849 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
850 if (wmp->wmp_flags & WMP_F_1000T)
851 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
852 "product!\n", sc->sc_dev.dv_xname);
853 wm_tbi_mediainit(sc);
854 } else {
855 if (wmp->wmp_flags & WMP_F_1000X)
856 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
857 "product!\n", sc->sc_dev.dv_xname);
858 wm_gmii_mediainit(sc);
859 }
860
861 ifp = &sc->sc_ethercom.ec_if;
862 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
863 ifp->if_softc = sc;
864 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
865 ifp->if_ioctl = wm_ioctl;
866 ifp->if_start = wm_start;
867 ifp->if_watchdog = wm_watchdog;
868 ifp->if_init = wm_init;
869 ifp->if_stop = wm_stop;
870 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
871 IFQ_SET_READY(&ifp->if_snd);
872
873 /*
874 * If we're a i82543 or greater, we can support VLANs.
875 */
876 if (sc->sc_type >= WM_T_82543)
877 sc->sc_ethercom.ec_capabilities |=
878 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
879
880 /*
881 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
882 * on i82543 and later.
883 */
884 if (sc->sc_type >= WM_T_82543)
885 ifp->if_capabilities |=
886 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
887
888 /*
889 * Attach the interface.
890 */
891 if_attach(ifp);
892 ether_ifattach(ifp, enaddr);
893 #if NRND > 0
894 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
895 RND_TYPE_NET, 0);
896 #endif
897
898 #ifdef WM_EVENT_COUNTERS
899 /* Attach event counters. */
900 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
901 NULL, sc->sc_dev.dv_xname, "txsstall");
902 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
903 NULL, sc->sc_dev.dv_xname, "txdstall");
904 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
905 NULL, sc->sc_dev.dv_xname, "txforceintr");
906 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
907 NULL, sc->sc_dev.dv_xname, "txdw");
908 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
909 NULL, sc->sc_dev.dv_xname, "txqe");
910 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
911 NULL, sc->sc_dev.dv_xname, "rxintr");
912 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
913 NULL, sc->sc_dev.dv_xname, "linkintr");
914
915 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
916 NULL, sc->sc_dev.dv_xname, "rxipsum");
917 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
918 NULL, sc->sc_dev.dv_xname, "rxtusum");
919 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
920 NULL, sc->sc_dev.dv_xname, "txipsum");
921 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
922 NULL, sc->sc_dev.dv_xname, "txtusum");
923
924 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
925 NULL, sc->sc_dev.dv_xname, "txctx init");
926 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "txctx hit");
928 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
929 NULL, sc->sc_dev.dv_xname, "txctx miss");
930
931 for (i = 0; i < WM_NTXSEGS; i++)
932 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
933 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
934
935 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txdrop");
937
938 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
939 NULL, sc->sc_dev.dv_xname, "tu");
940 #endif /* WM_EVENT_COUNTERS */
941
942 /*
943 * Make sure the interface is shutdown during reboot.
944 */
945 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
946 if (sc->sc_sdhook == NULL)
947 printf("%s: WARNING: unable to establish shutdown hook\n",
948 sc->sc_dev.dv_xname);
949 return;
950
951 /*
952 * Free any resources we've allocated during the failed attach
953 * attempt. Do this in reverse order and fall through.
954 */
955 fail_5:
956 for (i = 0; i < WM_NRXDESC; i++) {
957 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
958 bus_dmamap_destroy(sc->sc_dmat,
959 sc->sc_rxsoft[i].rxs_dmamap);
960 }
961 fail_4:
962 for (i = 0; i < WM_TXQUEUELEN; i++) {
963 if (sc->sc_txsoft[i].txs_dmamap != NULL)
964 bus_dmamap_destroy(sc->sc_dmat,
965 sc->sc_txsoft[i].txs_dmamap);
966 }
967 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
968 fail_3:
969 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
970 fail_2:
971 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
972 sizeof(struct wm_control_data));
973 fail_1:
974 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
975 fail_0:
976 return;
977 }
978
979 /*
980 * wm_shutdown:
981 *
982 * Make sure the interface is stopped at reboot time.
983 */
984 void
985 wm_shutdown(void *arg)
986 {
987 struct wm_softc *sc = arg;
988
989 wm_stop(&sc->sc_ethercom.ec_if, 1);
990 }
991
992 /*
993 * wm_tx_cksum:
994 *
995 * Set up TCP/IP checksumming parameters for the
996 * specified packet.
997 */
998 static int
999 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1000 uint32_t *fieldsp)
1001 {
1002 struct mbuf *m0 = txs->txs_mbuf;
1003 struct livengood_tcpip_ctxdesc *t;
1004 uint32_t fields = 0, ipcs, tucs;
1005 struct ip *ip;
1006 struct ether_header *eh;
1007 int offset, iphl;
1008
1009 /*
1010 * XXX It would be nice if the mbuf pkthdr had offset
1011 * fields for the protocol headers.
1012 */
1013
1014 eh = mtod(m0, struct ether_header *);
1015 switch (htons(eh->ether_type)) {
1016 case ETHERTYPE_IP:
1017 iphl = sizeof(struct ip);
1018 offset = ETHER_HDR_LEN;
1019 break;
1020
1021 default:
1022 /*
1023 * Don't support this protocol or encapsulation.
1024 */
1025 *fieldsp = 0;
1026 *cmdp = 0;
1027 return (0);
1028 }
1029
1030 /* XXX */
1031 if (m0->m_len < (offset + iphl)) {
1032 printf("%s: wm_tx_cksum: need to m_pullup, "
1033 "packet dropped\n", sc->sc_dev.dv_xname);
1034 return (EINVAL);
1035 }
1036
1037 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1038 iphl = ip->ip_hl << 2;
1039
1040 /*
1041 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1042 * offload feature, if we load the context descriptor, we
1043 * MUST provide valid values for IPCSS and TUCSS fields.
1044 */
1045
1046 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1047 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1048 fields |= htole32(WTX_IXSM);
1049 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1050 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1051 WTX_TCPIP_IPCSE(offset + iphl - 1));
1052 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1053 /* Use the cached value. */
1054 ipcs = sc->sc_txctx_ipcs;
1055 } else {
1056 /* Just initialize it to the likely value anyway. */
1057 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1058 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1059 WTX_TCPIP_IPCSE(offset + iphl - 1));
1060 }
1061
1062 offset += iphl;
1063
1064 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1065 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1066 fields |= htole32(WTX_TXSM);
1067 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1068 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1069 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1070 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1071 /* Use the cached value. */
1072 tucs = sc->sc_txctx_tucs;
1073 } else {
1074 /* Just initialize it to a valid TCP context. */
1075 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1076 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1077 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1078 }
1079
1080 if (sc->sc_txctx_ipcs == ipcs &&
1081 sc->sc_txctx_tucs == tucs) {
1082 /* Cached context is fine. */
1083 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1084 } else {
1085 /* Fill in the context descriptor. */
1086 #ifdef WM_EVENT_COUNTERS
1087 if (sc->sc_txctx_ipcs == 0xffffffff &&
1088 sc->sc_txctx_tucs == 0xffffffff)
1089 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1090 else
1091 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1092 #endif
1093 t = (struct livengood_tcpip_ctxdesc *)
1094 &sc->sc_txdescs[sc->sc_txnext];
1095 t->tcpip_ipcs = ipcs;
1096 t->tcpip_tucs = tucs;
1097 t->tcpip_cmdlen =
1098 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1099 t->tcpip_seg = 0;
1100 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1101
1102 sc->sc_txctx_ipcs = ipcs;
1103 sc->sc_txctx_tucs = tucs;
1104
1105 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1106 txs->txs_ndesc++;
1107 }
1108
1109 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1110 *fieldsp = fields;
1111
1112 return (0);
1113 }
1114
1115 /*
1116 * wm_start: [ifnet interface function]
1117 *
1118 * Start packet transmission on the interface.
1119 */
1120 void
1121 wm_start(struct ifnet *ifp)
1122 {
1123 struct wm_softc *sc = ifp->if_softc;
1124 struct mbuf *m0;
1125 #if 0 /* XXXJRT */
1126 struct m_tag *mtag;
1127 #endif
1128 struct wm_txsoft *txs;
1129 bus_dmamap_t dmamap;
1130 int error, nexttx, lasttx, ofree, seg;
1131 uint32_t cksumcmd, cksumfields;
1132
1133 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1134 return;
1135
1136 /*
1137 * Remember the previous number of free descriptors.
1138 */
1139 ofree = sc->sc_txfree;
1140
1141 /*
1142 * Loop through the send queue, setting up transmit descriptors
1143 * until we drain the queue, or use up all available transmit
1144 * descriptors.
1145 */
1146 for (;;) {
1147 /* Grab a packet off the queue. */
1148 IFQ_POLL(&ifp->if_snd, m0);
1149 if (m0 == NULL)
1150 break;
1151
1152 DPRINTF(WM_DEBUG_TX,
1153 ("%s: TX: have packet to transmit: %p\n",
1154 sc->sc_dev.dv_xname, m0));
1155
1156 /* Get a work queue entry. */
1157 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1158 wm_txintr(sc);
1159 if (sc->sc_txsfree == 0) {
1160 DPRINTF(WM_DEBUG_TX,
1161 ("%s: TX: no free job descriptors\n",
1162 sc->sc_dev.dv_xname));
1163 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1164 break;
1165 }
1166 }
1167
1168 txs = &sc->sc_txsoft[sc->sc_txsnext];
1169 dmamap = txs->txs_dmamap;
1170
1171 /*
1172 * Load the DMA map. If this fails, the packet either
1173 * didn't fit in the allotted number of segments, or we
1174 * were short on resources. For the too-many-segments
1175 * case, we simply report an error and drop the packet,
1176 * since we can't sanely copy a jumbo packet to a single
1177 * buffer.
1178 */
1179 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1180 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1181 if (error) {
1182 if (error == EFBIG) {
1183 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1184 printf("%s: Tx packet consumes too many "
1185 "DMA segments, dropping...\n",
1186 sc->sc_dev.dv_xname);
1187 IFQ_DEQUEUE(&ifp->if_snd, m0);
1188 m_freem(m0);
1189 continue;
1190 }
1191 /*
1192 * Short on resources, just stop for now.
1193 */
1194 DPRINTF(WM_DEBUG_TX,
1195 ("%s: TX: dmamap load failed: %d\n",
1196 sc->sc_dev.dv_xname, error));
1197 break;
1198 }
1199
1200 /*
1201 * Ensure we have enough descriptors free to describe
1202 * the packet. Note, we always reserve one descriptor
1203 * at the end of the ring due to the semantics of the
1204 * TDT register, plus one more in the event we need
1205 * to re-load checksum offload context.
1206 */
1207 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1208 /*
1209 * Not enough free descriptors to transmit this
1210 * packet. We haven't committed anything yet,
1211 * so just unload the DMA map, put the packet
1212 * pack on the queue, and punt. Notify the upper
1213 * layer that there are no more slots left.
1214 */
1215 DPRINTF(WM_DEBUG_TX,
1216 ("%s: TX: need %d descriptors, have %d\n",
1217 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1218 sc->sc_txfree - 1));
1219 ifp->if_flags |= IFF_OACTIVE;
1220 bus_dmamap_unload(sc->sc_dmat, dmamap);
1221 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1222 break;
1223 }
1224
1225 IFQ_DEQUEUE(&ifp->if_snd, m0);
1226
1227 /*
1228 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1229 */
1230
1231 /* Sync the DMA map. */
1232 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1233 BUS_DMASYNC_PREWRITE);
1234
1235 DPRINTF(WM_DEBUG_TX,
1236 ("%s: TX: packet has %d DMA segments\n",
1237 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1238
1239 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1240
1241 /*
1242 * Store a pointer to the packet so that we can free it
1243 * later.
1244 *
1245 * Initially, we consider the number of descriptors the
1246 * packet uses the number of DMA segments. This may be
1247 * incremented by 1 if we do checksum offload (a descriptor
1248 * is used to set the checksum context).
1249 */
1250 txs->txs_mbuf = m0;
1251 txs->txs_firstdesc = sc->sc_txnext;
1252 txs->txs_ndesc = dmamap->dm_nsegs;
1253
1254 /*
1255 * Set up checksum offload parameters for
1256 * this packet.
1257 */
1258 if (m0->m_pkthdr.csum_flags &
1259 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1260 if (wm_tx_cksum(sc, txs, &cksumcmd,
1261 &cksumfields) != 0) {
1262 /* Error message already displayed. */
1263 m_freem(m0);
1264 bus_dmamap_unload(sc->sc_dmat, dmamap);
1265 txs->txs_mbuf = NULL;
1266 continue;
1267 }
1268 } else {
1269 cksumcmd = 0;
1270 cksumfields = 0;
1271 }
1272
1273 cksumcmd |= htole32(WTX_CMD_IDE);
1274
1275 /*
1276 * Initialize the transmit descriptor.
1277 */
1278 for (nexttx = sc->sc_txnext, seg = 0;
1279 seg < dmamap->dm_nsegs;
1280 seg++, nexttx = WM_NEXTTX(nexttx)) {
1281 /*
1282 * Note: we currently only use 32-bit DMA
1283 * addresses.
1284 */
1285 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1286 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1287 htole32(dmamap->dm_segs[seg].ds_addr);
1288 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1289 htole32(dmamap->dm_segs[seg].ds_len);
1290 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1291 cksumfields;
1292 lasttx = nexttx;
1293
1294 DPRINTF(WM_DEBUG_TX,
1295 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1296 sc->sc_dev.dv_xname, nexttx,
1297 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1298 (uint32_t) dmamap->dm_segs[seg].ds_len));
1299 }
1300
1301 /*
1302 * Set up the command byte on the last descriptor of
1303 * the packet. If we're in the interrupt delay window,
1304 * delay the interrupt.
1305 */
1306 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1307 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1308
1309 #if 0 /* XXXJRT */
1310 /*
1311 * If VLANs are enabled and the packet has a VLAN tag, set
1312 * up the descriptor to encapsulate the packet for us.
1313 *
1314 * This is only valid on the last descriptor of the packet.
1315 */
1316 if (sc->sc_ethercom.ec_nvlans != 0 &&
1317 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1318 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1319 htole32(WTX_CMD_VLE);
1320 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1321 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1322 }
1323 #endif /* XXXJRT */
1324
1325 txs->txs_lastdesc = lasttx;
1326
1327 DPRINTF(WM_DEBUG_TX,
1328 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1329 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1330
1331 /* Sync the descriptors we're using. */
1332 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1333 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1334
1335 /* Give the packet to the chip. */
1336 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1337
1338 DPRINTF(WM_DEBUG_TX,
1339 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1340
1341 DPRINTF(WM_DEBUG_TX,
1342 ("%s: TX: finished transmitting packet, job %d\n",
1343 sc->sc_dev.dv_xname, sc->sc_txsnext));
1344
1345 /* Advance the tx pointer. */
1346 sc->sc_txfree -= txs->txs_ndesc;
1347 sc->sc_txnext = nexttx;
1348
1349 sc->sc_txsfree--;
1350 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1351
1352 #if NBPFILTER > 0
1353 /* Pass the packet to any BPF listeners. */
1354 if (ifp->if_bpf)
1355 bpf_mtap(ifp->if_bpf, m0);
1356 #endif /* NBPFILTER > 0 */
1357 }
1358
1359 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1360 /* No more slots; notify upper layer. */
1361 ifp->if_flags |= IFF_OACTIVE;
1362 }
1363
1364 if (sc->sc_txfree != ofree) {
1365 /* Set a watchdog timer in case the chip flakes out. */
1366 ifp->if_timer = 5;
1367 }
1368 }
1369
1370 /*
1371 * wm_watchdog: [ifnet interface function]
1372 *
1373 * Watchdog timer handler.
1374 */
1375 void
1376 wm_watchdog(struct ifnet *ifp)
1377 {
1378 struct wm_softc *sc = ifp->if_softc;
1379
1380 /*
1381 * Since we're using delayed interrupts, sweep up
1382 * before we report an error.
1383 */
1384 wm_txintr(sc);
1385
1386 if (sc->sc_txfree != WM_NTXDESC) {
1387 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1388 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1389 sc->sc_txnext);
1390 ifp->if_oerrors++;
1391
1392 /* Reset the interface. */
1393 (void) wm_init(ifp);
1394 }
1395
1396 /* Try to get more packets going. */
1397 wm_start(ifp);
1398 }
1399
1400 /*
1401 * wm_ioctl: [ifnet interface function]
1402 *
1403 * Handle control requests from the operator.
1404 */
1405 int
1406 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1407 {
1408 struct wm_softc *sc = ifp->if_softc;
1409 struct ifreq *ifr = (struct ifreq *) data;
1410 int s, error;
1411
1412 s = splnet();
1413
1414 switch (cmd) {
1415 case SIOCSIFMEDIA:
1416 case SIOCGIFMEDIA:
1417 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1418 break;
1419
1420 default:
1421 error = ether_ioctl(ifp, cmd, data);
1422 if (error == ENETRESET) {
1423 /*
1424 * Multicast list has changed; set the hardware filter
1425 * accordingly.
1426 */
1427 wm_set_filter(sc);
1428 error = 0;
1429 }
1430 break;
1431 }
1432
1433 /* Try to get more packets going. */
1434 wm_start(ifp);
1435
1436 splx(s);
1437 return (error);
1438 }
1439
1440 /*
1441 * wm_intr:
1442 *
1443 * Interrupt service routine.
1444 */
1445 int
1446 wm_intr(void *arg)
1447 {
1448 struct wm_softc *sc = arg;
1449 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1450 uint32_t icr;
1451 int wantinit, handled = 0;
1452
1453 for (wantinit = 0; wantinit == 0;) {
1454 icr = CSR_READ(sc, WMREG_ICR);
1455 if ((icr & sc->sc_icr) == 0)
1456 break;
1457
1458 #if 0 /*NRND > 0*/
1459 if (RND_ENABLED(&sc->rnd_source))
1460 rnd_add_uint32(&sc->rnd_source, icr);
1461 #endif
1462
1463 handled = 1;
1464
1465 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1466 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1467 DPRINTF(WM_DEBUG_RX,
1468 ("%s: RX: got Rx intr 0x%08x\n",
1469 sc->sc_dev.dv_xname,
1470 icr & (ICR_RXDMT0|ICR_RXT0)));
1471 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1472 }
1473 #endif
1474 wm_rxintr(sc);
1475
1476 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1477 if (icr & ICR_TXDW) {
1478 DPRINTF(WM_DEBUG_TX,
1479 ("%s: TX: got TDXW interrupt\n",
1480 sc->sc_dev.dv_xname));
1481 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1482 }
1483 #endif
1484 wm_txintr(sc);
1485
1486 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1487 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1488 wm_linkintr(sc, icr);
1489 }
1490
1491 if (icr & ICR_RXO) {
1492 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1493 wantinit = 1;
1494 }
1495 }
1496
1497 if (handled) {
1498 if (wantinit)
1499 wm_init(ifp);
1500
1501 /* Try to get more packets going. */
1502 wm_start(ifp);
1503 }
1504
1505 return (handled);
1506 }
1507
1508 /*
1509 * wm_txintr:
1510 *
1511 * Helper; handle transmit interrupts.
1512 */
1513 void
1514 wm_txintr(struct wm_softc *sc)
1515 {
1516 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1517 struct wm_txsoft *txs;
1518 uint8_t status;
1519 int i;
1520
1521 ifp->if_flags &= ~IFF_OACTIVE;
1522
1523 /*
1524 * Go through the Tx list and free mbufs for those
1525 * frames which have been transmitted.
1526 */
1527 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1528 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1529 txs = &sc->sc_txsoft[i];
1530
1531 DPRINTF(WM_DEBUG_TX,
1532 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1533
1534 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1535 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1536
1537 status = le32toh(sc->sc_txdescs[
1538 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1539 if ((status & WTX_ST_DD) == 0) {
1540 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1541 BUS_DMASYNC_PREREAD);
1542 break;
1543 }
1544
1545 DPRINTF(WM_DEBUG_TX,
1546 ("%s: TX: job %d done: descs %d..%d\n",
1547 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1548 txs->txs_lastdesc));
1549
1550 /*
1551 * XXX We should probably be using the statistics
1552 * XXX registers, but I don't know if they exist
1553 * XXX on chips before the i82544.
1554 */
1555
1556 #ifdef WM_EVENT_COUNTERS
1557 if (status & WTX_ST_TU)
1558 WM_EVCNT_INCR(&sc->sc_ev_tu);
1559 #endif /* WM_EVENT_COUNTERS */
1560
1561 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1562 ifp->if_oerrors++;
1563 if (status & WTX_ST_LC)
1564 printf("%s: late collision\n",
1565 sc->sc_dev.dv_xname);
1566 else if (status & WTX_ST_EC) {
1567 ifp->if_collisions += 16;
1568 printf("%s: excessive collisions\n",
1569 sc->sc_dev.dv_xname);
1570 }
1571 } else
1572 ifp->if_opackets++;
1573
1574 sc->sc_txfree += txs->txs_ndesc;
1575 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1576 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1577 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1578 m_freem(txs->txs_mbuf);
1579 txs->txs_mbuf = NULL;
1580 }
1581
1582 /* Update the dirty transmit buffer pointer. */
1583 sc->sc_txsdirty = i;
1584 DPRINTF(WM_DEBUG_TX,
1585 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1586
1587 /*
1588 * If there are no more pending transmissions, cancel the watchdog
1589 * timer.
1590 */
1591 if (sc->sc_txsfree == WM_TXQUEUELEN)
1592 ifp->if_timer = 0;
1593 }
1594
1595 /*
1596 * wm_rxintr:
1597 *
1598 * Helper; handle receive interrupts.
1599 */
1600 void
1601 wm_rxintr(struct wm_softc *sc)
1602 {
1603 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1604 struct wm_rxsoft *rxs;
1605 struct mbuf *m;
1606 int i, len;
1607 uint8_t status, errors;
1608
1609 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1610 rxs = &sc->sc_rxsoft[i];
1611
1612 DPRINTF(WM_DEBUG_RX,
1613 ("%s: RX: checking descriptor %d\n",
1614 sc->sc_dev.dv_xname, i));
1615
1616 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1617
1618 status = sc->sc_rxdescs[i].wrx_status;
1619 errors = sc->sc_rxdescs[i].wrx_errors;
1620 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1621
1622 if ((status & WRX_ST_DD) == 0) {
1623 /*
1624 * We have processed all of the receive descriptors.
1625 */
1626 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1627 break;
1628 }
1629
1630 if (__predict_false(sc->sc_rxdiscard)) {
1631 DPRINTF(WM_DEBUG_RX,
1632 ("%s: RX: discarding contents of descriptor %d\n",
1633 sc->sc_dev.dv_xname, i));
1634 WM_INIT_RXDESC(sc, i);
1635 if (status & WRX_ST_EOP) {
1636 /* Reset our state. */
1637 DPRINTF(WM_DEBUG_RX,
1638 ("%s: RX: resetting rxdiscard -> 0\n",
1639 sc->sc_dev.dv_xname));
1640 sc->sc_rxdiscard = 0;
1641 }
1642 continue;
1643 }
1644
1645 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1646 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1647
1648 m = rxs->rxs_mbuf;
1649
1650 /*
1651 * Add a new receive buffer to the ring.
1652 */
1653 if (wm_add_rxbuf(sc, i) != 0) {
1654 /*
1655 * Failed, throw away what we've done so
1656 * far, and discard the rest of the packet.
1657 */
1658 ifp->if_ierrors++;
1659 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1660 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1661 WM_INIT_RXDESC(sc, i);
1662 if ((status & WRX_ST_EOP) == 0)
1663 sc->sc_rxdiscard = 1;
1664 if (sc->sc_rxhead != NULL)
1665 m_freem(sc->sc_rxhead);
1666 WM_RXCHAIN_RESET(sc);
1667 DPRINTF(WM_DEBUG_RX,
1668 ("%s: RX: Rx buffer allocation failed, "
1669 "dropping packet%s\n", sc->sc_dev.dv_xname,
1670 sc->sc_rxdiscard ? " (discard)" : ""));
1671 continue;
1672 }
1673
1674 WM_RXCHAIN_LINK(sc, m);
1675
1676 m->m_len = len;
1677
1678 DPRINTF(WM_DEBUG_RX,
1679 ("%s: RX: buffer at %p len %d\n",
1680 sc->sc_dev.dv_xname, m->m_data, len));
1681
1682 /*
1683 * If this is not the end of the packet, keep
1684 * looking.
1685 */
1686 if ((status & WRX_ST_EOP) == 0) {
1687 sc->sc_rxlen += len;
1688 DPRINTF(WM_DEBUG_RX,
1689 ("%s: RX: not yet EOP, rxlen -> %d\n",
1690 sc->sc_dev.dv_xname, sc->sc_rxlen));
1691 continue;
1692 }
1693
1694 /*
1695 * Okay, we have the entire packet now...
1696 */
1697 *sc->sc_rxtailp = NULL;
1698 m = sc->sc_rxhead;
1699 len += sc->sc_rxlen;
1700
1701 WM_RXCHAIN_RESET(sc);
1702
1703 DPRINTF(WM_DEBUG_RX,
1704 ("%s: RX: have entire packet, len -> %d\n",
1705 sc->sc_dev.dv_xname, len));
1706
1707 /*
1708 * If an error occurred, update stats and drop the packet.
1709 */
1710 if (errors &
1711 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1712 ifp->if_ierrors++;
1713 if (errors & WRX_ER_SE)
1714 printf("%s: symbol error\n",
1715 sc->sc_dev.dv_xname);
1716 else if (errors & WRX_ER_SEQ)
1717 printf("%s: receive sequence error\n",
1718 sc->sc_dev.dv_xname);
1719 else if (errors & WRX_ER_CE)
1720 printf("%s: CRC error\n",
1721 sc->sc_dev.dv_xname);
1722 m_freem(m);
1723 continue;
1724 }
1725
1726 /*
1727 * No errors. Receive the packet.
1728 *
1729 * Note, we have configured the chip to include the
1730 * CRC with every packet.
1731 */
1732 m->m_flags |= M_HASFCS;
1733 m->m_pkthdr.rcvif = ifp;
1734 m->m_pkthdr.len = len;
1735
1736 #if 0 /* XXXJRT */
1737 /*
1738 * If VLANs are enabled, VLAN packets have been unwrapped
1739 * for us. Associate the tag with the packet.
1740 */
1741 if (sc->sc_ethercom.ec_nvlans != 0 &&
1742 (status & WRX_ST_VP) != 0) {
1743 struct m_tag *vtag;
1744
1745 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1746 M_NOWAIT);
1747 if (vtag == NULL) {
1748 ifp->if_ierrors++;
1749 printf("%s: unable to allocate VLAN tag\n",
1750 sc->sc_dev.dv_xname);
1751 m_freem(m);
1752 continue;
1753 }
1754
1755 *(u_int *)(vtag + 1) =
1756 le16toh(sc->sc_rxdescs[i].wrx_special);
1757 }
1758 #endif /* XXXJRT */
1759
1760 /*
1761 * Set up checksum info for this packet.
1762 */
1763 if (status & WRX_ST_IPCS) {
1764 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1765 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1766 if (errors & WRX_ER_IPE)
1767 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1768 }
1769 if (status & WRX_ST_TCPCS) {
1770 /*
1771 * Note: we don't know if this was TCP or UDP,
1772 * so we just set both bits, and expect the
1773 * upper layers to deal.
1774 */
1775 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1776 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1777 if (errors & WRX_ER_TCPE)
1778 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1779 }
1780
1781 ifp->if_ipackets++;
1782
1783 #if NBPFILTER > 0
1784 /* Pass this up to any BPF listeners. */
1785 if (ifp->if_bpf)
1786 bpf_mtap(ifp->if_bpf, m);
1787 #endif /* NBPFILTER > 0 */
1788
1789 /* Pass it on. */
1790 (*ifp->if_input)(ifp, m);
1791 }
1792
1793 /* Update the receive pointer. */
1794 sc->sc_rxptr = i;
1795
1796 DPRINTF(WM_DEBUG_RX,
1797 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1798 }
1799
1800 /*
1801 * wm_linkintr:
1802 *
1803 * Helper; handle link interrupts.
1804 */
1805 void
1806 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1807 {
1808 uint32_t status;
1809
1810 /*
1811 * If we get a link status interrupt on a 1000BASE-T
1812 * device, just fall into the normal MII tick path.
1813 */
1814 if (sc->sc_flags & WM_F_HAS_MII) {
1815 if (icr & ICR_LSC) {
1816 DPRINTF(WM_DEBUG_LINK,
1817 ("%s: LINK: LSC -> mii_tick\n",
1818 sc->sc_dev.dv_xname));
1819 mii_tick(&sc->sc_mii);
1820 } else if (icr & ICR_RXSEQ) {
1821 DPRINTF(WM_DEBUG_LINK,
1822 ("%s: LINK Receive sequence error\n",
1823 sc->sc_dev.dv_xname));
1824 }
1825 return;
1826 }
1827
1828 /*
1829 * If we are now receiving /C/, check for link again in
1830 * a couple of link clock ticks.
1831 */
1832 if (icr & ICR_RXCFG) {
1833 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1834 sc->sc_dev.dv_xname));
1835 sc->sc_tbi_anstate = 2;
1836 }
1837
1838 if (icr & ICR_LSC) {
1839 status = CSR_READ(sc, WMREG_STATUS);
1840 if (status & STATUS_LU) {
1841 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1842 sc->sc_dev.dv_xname,
1843 (status & STATUS_FD) ? "FDX" : "HDX"));
1844 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1845 if (status & STATUS_FD)
1846 sc->sc_tctl |=
1847 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1848 else
1849 sc->sc_tctl |=
1850 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1851 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1852 sc->sc_tbi_linkup = 1;
1853 } else {
1854 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1855 sc->sc_dev.dv_xname));
1856 sc->sc_tbi_linkup = 0;
1857 }
1858 sc->sc_tbi_anstate = 2;
1859 wm_tbi_set_linkled(sc);
1860 } else if (icr & ICR_RXSEQ) {
1861 DPRINTF(WM_DEBUG_LINK,
1862 ("%s: LINK: Receive sequence error\n",
1863 sc->sc_dev.dv_xname));
1864 }
1865 }
1866
1867 /*
1868 * wm_tick:
1869 *
1870 * One second timer, used to check link status, sweep up
1871 * completed transmit jobs, etc.
1872 */
1873 void
1874 wm_tick(void *arg)
1875 {
1876 struct wm_softc *sc = arg;
1877 int s;
1878
1879 s = splnet();
1880
1881 if (sc->sc_flags & WM_F_HAS_MII)
1882 mii_tick(&sc->sc_mii);
1883 else
1884 wm_tbi_check_link(sc);
1885
1886 splx(s);
1887
1888 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1889 }
1890
1891 /*
1892 * wm_reset:
1893 *
1894 * Reset the i82542 chip.
1895 */
1896 void
1897 wm_reset(struct wm_softc *sc)
1898 {
1899 int i;
1900
1901 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1902 delay(10000);
1903
1904 for (i = 0; i < 1000; i++) {
1905 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1906 return;
1907 delay(20);
1908 }
1909
1910 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1911 printf("%s: WARNING: reset failed to complete\n",
1912 sc->sc_dev.dv_xname);
1913 }
1914
1915 /*
1916 * wm_init: [ifnet interface function]
1917 *
1918 * Initialize the interface. Must be called at splnet().
1919 */
1920 int
1921 wm_init(struct ifnet *ifp)
1922 {
1923 struct wm_softc *sc = ifp->if_softc;
1924 struct wm_rxsoft *rxs;
1925 int i, error = 0;
1926 uint32_t reg;
1927
1928 /* Cancel any pending I/O. */
1929 wm_stop(ifp, 0);
1930
1931 /* Reset the chip to a known state. */
1932 wm_reset(sc);
1933
1934 /* Initialize the transmit descriptor ring. */
1935 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1936 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1937 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1938 sc->sc_txfree = WM_NTXDESC;
1939 sc->sc_txnext = 0;
1940
1941 sc->sc_txctx_ipcs = 0xffffffff;
1942 sc->sc_txctx_tucs = 0xffffffff;
1943
1944 if (sc->sc_type < WM_T_82543) {
1945 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1946 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1947 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1948 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1949 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1950 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1951 } else {
1952 CSR_WRITE(sc, WMREG_TBDAH, 0);
1953 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1954 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1955 CSR_WRITE(sc, WMREG_TDH, 0);
1956 CSR_WRITE(sc, WMREG_TDT, 0);
1957 CSR_WRITE(sc, WMREG_TIDV, 128);
1958
1959 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1960 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1961 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1962 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1963 }
1964 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1965 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1966
1967 /* Initialize the transmit job descriptors. */
1968 for (i = 0; i < WM_TXQUEUELEN; i++)
1969 sc->sc_txsoft[i].txs_mbuf = NULL;
1970 sc->sc_txsfree = WM_TXQUEUELEN;
1971 sc->sc_txsnext = 0;
1972 sc->sc_txsdirty = 0;
1973
1974 /*
1975 * Initialize the receive descriptor and receive job
1976 * descriptor rings.
1977 */
1978 if (sc->sc_type < WM_T_82543) {
1979 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1980 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1981 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1982 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1983 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1984 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1985
1986 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1987 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1988 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1989 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1990 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1991 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1992 } else {
1993 CSR_WRITE(sc, WMREG_RDBAH, 0);
1994 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1995 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1996 CSR_WRITE(sc, WMREG_RDH, 0);
1997 CSR_WRITE(sc, WMREG_RDT, 0);
1998 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1999 }
2000 for (i = 0; i < WM_NRXDESC; i++) {
2001 rxs = &sc->sc_rxsoft[i];
2002 if (rxs->rxs_mbuf == NULL) {
2003 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2004 printf("%s: unable to allocate or map rx "
2005 "buffer %d, error = %d\n",
2006 sc->sc_dev.dv_xname, i, error);
2007 /*
2008 * XXX Should attempt to run with fewer receive
2009 * XXX buffers instead of just failing.
2010 */
2011 wm_rxdrain(sc);
2012 goto out;
2013 }
2014 } else
2015 WM_INIT_RXDESC(sc, i);
2016 }
2017 sc->sc_rxptr = 0;
2018 sc->sc_rxdiscard = 0;
2019 WM_RXCHAIN_RESET(sc);
2020
2021 /*
2022 * Clear out the VLAN table -- we don't use it (yet).
2023 */
2024 CSR_WRITE(sc, WMREG_VET, 0);
2025 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2026 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2027
2028 /*
2029 * Set up flow-control parameters.
2030 *
2031 * XXX Values could probably stand some tuning.
2032 */
2033 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2034 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2035 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2036 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2037
2038 if (sc->sc_type < WM_T_82543) {
2039 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2040 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2041 } else {
2042 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2043 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2044 }
2045 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2046 }
2047
2048 #if 0 /* XXXJRT */
2049 /* Deal with VLAN enables. */
2050 if (sc->sc_ethercom.ec_nvlans != 0)
2051 sc->sc_ctrl |= CTRL_VME;
2052 else
2053 #endif /* XXXJRT */
2054 sc->sc_ctrl &= ~CTRL_VME;
2055
2056 /* Write the control registers. */
2057 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2058 #if 0
2059 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2060 #endif
2061
2062 /*
2063 * Set up checksum offload parameters.
2064 */
2065 reg = CSR_READ(sc, WMREG_RXCSUM);
2066 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2067 reg |= RXCSUM_IPOFL;
2068 else
2069 reg &= ~RXCSUM_IPOFL;
2070 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2071 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2072 else {
2073 reg &= ~RXCSUM_TUOFL;
2074 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2075 reg &= ~RXCSUM_IPOFL;
2076 }
2077 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2078
2079 /*
2080 * Set up the interrupt registers.
2081 */
2082 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2083 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2084 ICR_RXO | ICR_RXT0;
2085 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2086 sc->sc_icr |= ICR_RXCFG;
2087 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2088
2089 /* Set up the inter-packet gap. */
2090 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2091
2092 #if 0 /* XXXJRT */
2093 /* Set the VLAN ethernetype. */
2094 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2095 #endif
2096
2097 /*
2098 * Set up the transmit control register; we start out with
2099 * a collision distance suitable for FDX, but update it whe
2100 * we resolve the media type.
2101 */
2102 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2103 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2104 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2105
2106 /* Set the media. */
2107 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2108
2109 /*
2110 * Set up the receive control register; we actually program
2111 * the register when we set the receive filter. Use multicast
2112 * address offset type 0.
2113 *
2114 * Only the i82544 has the ability to strip the incoming
2115 * CRC, so we don't enable that feature.
2116 */
2117 sc->sc_mchash_type = 0;
2118 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2119 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2120
2121 /* Set the receive filter. */
2122 wm_set_filter(sc);
2123
2124 /* Start the one second link check clock. */
2125 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2126
2127 /* ...all done! */
2128 ifp->if_flags |= IFF_RUNNING;
2129 ifp->if_flags &= ~IFF_OACTIVE;
2130
2131 out:
2132 if (error)
2133 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2134 return (error);
2135 }
2136
2137 /*
2138 * wm_rxdrain:
2139 *
2140 * Drain the receive queue.
2141 */
2142 void
2143 wm_rxdrain(struct wm_softc *sc)
2144 {
2145 struct wm_rxsoft *rxs;
2146 int i;
2147
2148 for (i = 0; i < WM_NRXDESC; i++) {
2149 rxs = &sc->sc_rxsoft[i];
2150 if (rxs->rxs_mbuf != NULL) {
2151 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2152 m_freem(rxs->rxs_mbuf);
2153 rxs->rxs_mbuf = NULL;
2154 }
2155 }
2156 }
2157
2158 /*
2159 * wm_stop: [ifnet interface function]
2160 *
2161 * Stop transmission on the interface.
2162 */
2163 void
2164 wm_stop(struct ifnet *ifp, int disable)
2165 {
2166 struct wm_softc *sc = ifp->if_softc;
2167 struct wm_txsoft *txs;
2168 int i;
2169
2170 /* Stop the one second clock. */
2171 callout_stop(&sc->sc_tick_ch);
2172
2173 if (sc->sc_flags & WM_F_HAS_MII) {
2174 /* Down the MII. */
2175 mii_down(&sc->sc_mii);
2176 }
2177
2178 /* Stop the transmit and receive processes. */
2179 CSR_WRITE(sc, WMREG_TCTL, 0);
2180 CSR_WRITE(sc, WMREG_RCTL, 0);
2181
2182 /* Release any queued transmit buffers. */
2183 for (i = 0; i < WM_TXQUEUELEN; i++) {
2184 txs = &sc->sc_txsoft[i];
2185 if (txs->txs_mbuf != NULL) {
2186 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2187 m_freem(txs->txs_mbuf);
2188 txs->txs_mbuf = NULL;
2189 }
2190 }
2191
2192 if (disable)
2193 wm_rxdrain(sc);
2194
2195 /* Mark the interface as down and cancel the watchdog timer. */
2196 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2197 ifp->if_timer = 0;
2198 }
2199
2200 /*
2201 * wm_read_eeprom:
2202 *
2203 * Read data from the serial EEPROM.
2204 */
2205 void
2206 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2207 {
2208 uint32_t reg;
2209 int i, x, addrbits = 6;
2210
2211 for (i = 0; i < wordcnt; i++) {
2212 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2213 reg = CSR_READ(sc, WMREG_EECD);
2214
2215 /* Get number of address bits. */
2216 if (reg & EECD_EE_SIZE)
2217 addrbits = 8;
2218
2219 /* Request EEPROM access. */
2220 reg |= EECD_EE_REQ;
2221 CSR_WRITE(sc, WMREG_EECD, reg);
2222
2223 /* ..and wait for it to be granted. */
2224 for (x = 0; x < 100; x++) {
2225 reg = CSR_READ(sc, WMREG_EECD);
2226 if (reg & EECD_EE_GNT)
2227 break;
2228 delay(5);
2229 }
2230 if ((reg & EECD_EE_GNT) == 0) {
2231 printf("%s: could not acquire EEPROM GNT\n",
2232 sc->sc_dev.dv_xname);
2233 *data = 0xffff;
2234 reg &= ~EECD_EE_REQ;
2235 CSR_WRITE(sc, WMREG_EECD, reg);
2236 continue;
2237 }
2238 } else
2239 reg = 0;
2240
2241 /* Clear SK and DI. */
2242 reg &= ~(EECD_SK | EECD_DI);
2243 CSR_WRITE(sc, WMREG_EECD, reg);
2244
2245 /* Set CHIP SELECT. */
2246 reg |= EECD_CS;
2247 CSR_WRITE(sc, WMREG_EECD, reg);
2248 delay(2);
2249
2250 /* Shift in the READ command. */
2251 for (x = 3; x > 0; x--) {
2252 if (UWIRE_OPC_READ & (1 << (x - 1)))
2253 reg |= EECD_DI;
2254 else
2255 reg &= ~EECD_DI;
2256 CSR_WRITE(sc, WMREG_EECD, reg);
2257 delay(2);
2258 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2259 delay(2);
2260 CSR_WRITE(sc, WMREG_EECD, reg);
2261 delay(2);
2262 }
2263
2264 /* Shift in address. */
2265 for (x = addrbits; x > 0; x--) {
2266 if ((word + i) & (1 << (x - 1)))
2267 reg |= EECD_DI;
2268 else
2269 reg &= ~EECD_DI;
2270 CSR_WRITE(sc, WMREG_EECD, reg);
2271 delay(2);
2272 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2273 delay(2);
2274 CSR_WRITE(sc, WMREG_EECD, reg);
2275 delay(2);
2276 }
2277
2278 /* Shift out the data. */
2279 reg &= ~EECD_DI;
2280 data[i] = 0;
2281 for (x = 16; x > 0; x--) {
2282 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2283 delay(2);
2284 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2285 data[i] |= (1 << (x - 1));
2286 CSR_WRITE(sc, WMREG_EECD, reg);
2287 delay(2);
2288 }
2289
2290 /* Clear CHIP SELECT. */
2291 reg &= ~EECD_CS;
2292 CSR_WRITE(sc, WMREG_EECD, reg);
2293 delay(2);
2294
2295 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2296 /* Release the EEPROM. */
2297 reg &= ~EECD_EE_REQ;
2298 CSR_WRITE(sc, WMREG_EECD, reg);
2299 }
2300 }
2301 }
2302
2303 /*
2304 * wm_add_rxbuf:
2305 *
2306 * Add a receive buffer to the indiciated descriptor.
2307 */
2308 int
2309 wm_add_rxbuf(struct wm_softc *sc, int idx)
2310 {
2311 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2312 struct mbuf *m;
2313 int error;
2314
2315 MGETHDR(m, M_DONTWAIT, MT_DATA);
2316 if (m == NULL)
2317 return (ENOBUFS);
2318
2319 MCLGET(m, M_DONTWAIT);
2320 if ((m->m_flags & M_EXT) == 0) {
2321 m_freem(m);
2322 return (ENOBUFS);
2323 }
2324
2325 if (rxs->rxs_mbuf != NULL)
2326 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2327
2328 rxs->rxs_mbuf = m;
2329
2330 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2331 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2332 BUS_DMA_READ|BUS_DMA_NOWAIT);
2333 if (error) {
2334 printf("%s: unable to load rx DMA map %d, error = %d\n",
2335 sc->sc_dev.dv_xname, idx, error);
2336 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2337 }
2338
2339 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2340 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2341
2342 WM_INIT_RXDESC(sc, idx);
2343
2344 return (0);
2345 }
2346
2347 /*
2348 * wm_set_ral:
2349 *
2350 * Set an entery in the receive address list.
2351 */
2352 static void
2353 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2354 {
2355 uint32_t ral_lo, ral_hi;
2356
2357 if (enaddr != NULL) {
2358 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2359 (enaddr[3] << 24);
2360 ral_hi = enaddr[4] | (enaddr[5] << 8);
2361 ral_hi |= RAL_AV;
2362 } else {
2363 ral_lo = 0;
2364 ral_hi = 0;
2365 }
2366
2367 if (sc->sc_type >= WM_T_82544) {
2368 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2369 ral_lo);
2370 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2371 ral_hi);
2372 } else {
2373 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2374 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2375 }
2376 }
2377
2378 /*
2379 * wm_mchash:
2380 *
2381 * Compute the hash of the multicast address for the 4096-bit
2382 * multicast filter.
2383 */
2384 static uint32_t
2385 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2386 {
2387 static const int lo_shift[4] = { 4, 3, 2, 0 };
2388 static const int hi_shift[4] = { 4, 5, 6, 8 };
2389 uint32_t hash;
2390
2391 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2392 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2393
2394 return (hash & 0xfff);
2395 }
2396
2397 /*
2398 * wm_set_filter:
2399 *
2400 * Set up the receive filter.
2401 */
2402 void
2403 wm_set_filter(struct wm_softc *sc)
2404 {
2405 struct ethercom *ec = &sc->sc_ethercom;
2406 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2407 struct ether_multi *enm;
2408 struct ether_multistep step;
2409 bus_addr_t mta_reg;
2410 uint32_t hash, reg, bit;
2411 int i;
2412
2413 if (sc->sc_type >= WM_T_82544)
2414 mta_reg = WMREG_CORDOVA_MTA;
2415 else
2416 mta_reg = WMREG_MTA;
2417
2418 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2419
2420 if (ifp->if_flags & IFF_BROADCAST)
2421 sc->sc_rctl |= RCTL_BAM;
2422 if (ifp->if_flags & IFF_PROMISC) {
2423 sc->sc_rctl |= RCTL_UPE;
2424 goto allmulti;
2425 }
2426
2427 /*
2428 * Set the station address in the first RAL slot, and
2429 * clear the remaining slots.
2430 */
2431 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2432 for (i = 1; i < WM_RAL_TABSIZE; i++)
2433 wm_set_ral(sc, NULL, i);
2434
2435 /* Clear out the multicast table. */
2436 for (i = 0; i < WM_MC_TABSIZE; i++)
2437 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2438
2439 ETHER_FIRST_MULTI(step, ec, enm);
2440 while (enm != NULL) {
2441 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2442 /*
2443 * We must listen to a range of multicast addresses.
2444 * For now, just accept all multicasts, rather than
2445 * trying to set only those filter bits needed to match
2446 * the range. (At this time, the only use of address
2447 * ranges is for IP multicast routing, for which the
2448 * range is big enough to require all bits set.)
2449 */
2450 goto allmulti;
2451 }
2452
2453 hash = wm_mchash(sc, enm->enm_addrlo);
2454
2455 reg = (hash >> 5) & 0x7f;
2456 bit = hash & 0x1f;
2457
2458 hash = CSR_READ(sc, mta_reg + (reg << 2));
2459 hash |= 1U << bit;
2460
2461 /* XXX Hardware bug?? */
2462 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2463 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2464 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2465 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2466 } else
2467 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2468
2469 ETHER_NEXT_MULTI(step, enm);
2470 }
2471
2472 ifp->if_flags &= ~IFF_ALLMULTI;
2473 goto setit;
2474
2475 allmulti:
2476 ifp->if_flags |= IFF_ALLMULTI;
2477 sc->sc_rctl |= RCTL_MPE;
2478
2479 setit:
2480 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2481 }
2482
2483 /*
2484 * wm_tbi_mediainit:
2485 *
2486 * Initialize media for use on 1000BASE-X devices.
2487 */
2488 void
2489 wm_tbi_mediainit(struct wm_softc *sc)
2490 {
2491 const char *sep = "";
2492
2493 if (sc->sc_type < WM_T_82543)
2494 sc->sc_tipg = TIPG_WM_DFLT;
2495 else
2496 sc->sc_tipg = TIPG_LG_DFLT;
2497
2498 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2499 wm_tbi_mediastatus);
2500
2501 /*
2502 * SWD Pins:
2503 *
2504 * 0 = Link LED (output)
2505 * 1 = Loss Of Signal (input)
2506 */
2507 sc->sc_ctrl |= CTRL_SWDPIO(0);
2508 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2509
2510 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2511
2512 #define ADD(ss, mm, dd) \
2513 do { \
2514 printf("%s%s", sep, ss); \
2515 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2516 sep = ", "; \
2517 } while (/*CONSTCOND*/0)
2518
2519 printf("%s: ", sc->sc_dev.dv_xname);
2520 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2521 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2522 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2523 printf("\n");
2524
2525 #undef ADD
2526
2527 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2528 }
2529
2530 /*
2531 * wm_tbi_mediastatus: [ifmedia interface function]
2532 *
2533 * Get the current interface media status on a 1000BASE-X device.
2534 */
2535 void
2536 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2537 {
2538 struct wm_softc *sc = ifp->if_softc;
2539
2540 ifmr->ifm_status = IFM_AVALID;
2541 ifmr->ifm_active = IFM_ETHER;
2542
2543 if (sc->sc_tbi_linkup == 0) {
2544 ifmr->ifm_active |= IFM_NONE;
2545 return;
2546 }
2547
2548 ifmr->ifm_status |= IFM_ACTIVE;
2549 ifmr->ifm_active |= IFM_1000_SX;
2550 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2551 ifmr->ifm_active |= IFM_FDX;
2552 }
2553
2554 /*
2555 * wm_tbi_mediachange: [ifmedia interface function]
2556 *
2557 * Set hardware to newly-selected media on a 1000BASE-X device.
2558 */
2559 int
2560 wm_tbi_mediachange(struct ifnet *ifp)
2561 {
2562 struct wm_softc *sc = ifp->if_softc;
2563 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2564 uint32_t status;
2565 int i;
2566
2567 sc->sc_txcw = ife->ifm_data;
2568 if (sc->sc_ctrl & CTRL_RFCE)
2569 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2570 if (sc->sc_ctrl & CTRL_TFCE)
2571 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2572 sc->sc_txcw |= TXCW_ANE;
2573
2574 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2575 delay(10000);
2576
2577 sc->sc_tbi_anstate = 0;
2578
2579 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2580 /* Have signal; wait for the link to come up. */
2581 for (i = 0; i < 50; i++) {
2582 delay(10000);
2583 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2584 break;
2585 }
2586
2587 status = CSR_READ(sc, WMREG_STATUS);
2588 if (status & STATUS_LU) {
2589 /* Link is up. */
2590 DPRINTF(WM_DEBUG_LINK,
2591 ("%s: LINK: set media -> link up %s\n",
2592 sc->sc_dev.dv_xname,
2593 (status & STATUS_FD) ? "FDX" : "HDX"));
2594 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2595 if (status & STATUS_FD)
2596 sc->sc_tctl |=
2597 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2598 else
2599 sc->sc_tctl |=
2600 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2601 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2602 sc->sc_tbi_linkup = 1;
2603 } else {
2604 /* Link is down. */
2605 DPRINTF(WM_DEBUG_LINK,
2606 ("%s: LINK: set media -> link down\n",
2607 sc->sc_dev.dv_xname));
2608 sc->sc_tbi_linkup = 0;
2609 }
2610 } else {
2611 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2612 sc->sc_dev.dv_xname));
2613 sc->sc_tbi_linkup = 0;
2614 }
2615
2616 wm_tbi_set_linkled(sc);
2617
2618 return (0);
2619 }
2620
2621 /*
2622 * wm_tbi_set_linkled:
2623 *
2624 * Update the link LED on 1000BASE-X devices.
2625 */
2626 void
2627 wm_tbi_set_linkled(struct wm_softc *sc)
2628 {
2629
2630 if (sc->sc_tbi_linkup)
2631 sc->sc_ctrl |= CTRL_SWDPIN(0);
2632 else
2633 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2634
2635 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2636 }
2637
2638 /*
2639 * wm_tbi_check_link:
2640 *
2641 * Check the link on 1000BASE-X devices.
2642 */
2643 void
2644 wm_tbi_check_link(struct wm_softc *sc)
2645 {
2646 uint32_t rxcw, ctrl, status;
2647
2648 if (sc->sc_tbi_anstate == 0)
2649 return;
2650 else if (sc->sc_tbi_anstate > 1) {
2651 DPRINTF(WM_DEBUG_LINK,
2652 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2653 sc->sc_tbi_anstate));
2654 sc->sc_tbi_anstate--;
2655 return;
2656 }
2657
2658 sc->sc_tbi_anstate = 0;
2659
2660 rxcw = CSR_READ(sc, WMREG_RXCW);
2661 ctrl = CSR_READ(sc, WMREG_CTRL);
2662 status = CSR_READ(sc, WMREG_STATUS);
2663
2664 if ((status & STATUS_LU) == 0) {
2665 DPRINTF(WM_DEBUG_LINK,
2666 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2667 sc->sc_tbi_linkup = 0;
2668 } else {
2669 DPRINTF(WM_DEBUG_LINK,
2670 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2671 (status & STATUS_FD) ? "FDX" : "HDX"));
2672 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2673 if (status & STATUS_FD)
2674 sc->sc_tctl |=
2675 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2676 else
2677 sc->sc_tctl |=
2678 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2679 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2680 sc->sc_tbi_linkup = 1;
2681 }
2682
2683 wm_tbi_set_linkled(sc);
2684 }
2685
2686 /*
2687 * wm_gmii_reset:
2688 *
2689 * Reset the PHY.
2690 */
2691 void
2692 wm_gmii_reset(struct wm_softc *sc)
2693 {
2694 uint32_t reg;
2695
2696 if (sc->sc_type >= WM_T_82544) {
2697 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2698 delay(20000);
2699
2700 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2701 delay(20000);
2702 } else {
2703 /* The PHY reset pin is active-low. */
2704 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2705 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2706 CTRL_EXT_SWDPIN(4));
2707 reg |= CTRL_EXT_SWDPIO(4);
2708
2709 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2710 delay(10);
2711
2712 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2713 delay(10);
2714
2715 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2716 delay(10);
2717 #if 0
2718 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2719 #endif
2720 }
2721 }
2722
2723 /*
2724 * wm_gmii_mediainit:
2725 *
2726 * Initialize media for use on 1000BASE-T devices.
2727 */
2728 void
2729 wm_gmii_mediainit(struct wm_softc *sc)
2730 {
2731 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2732
2733 /* We have MII. */
2734 sc->sc_flags |= WM_F_HAS_MII;
2735
2736 sc->sc_tipg = TIPG_1000T_DFLT;
2737
2738 /*
2739 * Let the chip set speed/duplex on its own based on
2740 * signals from the PHY.
2741 */
2742 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2743 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2744
2745 /* Initialize our media structures and probe the GMII. */
2746 sc->sc_mii.mii_ifp = ifp;
2747
2748 if (sc->sc_type >= WM_T_82544) {
2749 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2750 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2751 } else {
2752 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2753 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2754 }
2755 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2756
2757 wm_gmii_reset(sc);
2758
2759 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2760 wm_gmii_mediastatus);
2761
2762 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2763 MII_OFFSET_ANY, 0);
2764 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2765 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2766 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2767 } else
2768 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2769 }
2770
2771 /*
2772 * wm_gmii_mediastatus: [ifmedia interface function]
2773 *
2774 * Get the current interface media status on a 1000BASE-T device.
2775 */
2776 void
2777 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2778 {
2779 struct wm_softc *sc = ifp->if_softc;
2780
2781 mii_pollstat(&sc->sc_mii);
2782 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2783 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2784 }
2785
2786 /*
2787 * wm_gmii_mediachange: [ifmedia interface function]
2788 *
2789 * Set hardware to newly-selected media on a 1000BASE-T device.
2790 */
2791 int
2792 wm_gmii_mediachange(struct ifnet *ifp)
2793 {
2794 struct wm_softc *sc = ifp->if_softc;
2795
2796 if (ifp->if_flags & IFF_UP)
2797 mii_mediachg(&sc->sc_mii);
2798 return (0);
2799 }
2800
2801 #define MDI_IO CTRL_SWDPIN(2)
2802 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2803 #define MDI_CLK CTRL_SWDPIN(3)
2804
2805 static void
2806 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2807 {
2808 uint32_t i, v;
2809
2810 v = CSR_READ(sc, WMREG_CTRL);
2811 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2812 v |= MDI_DIR | CTRL_SWDPIO(3);
2813
2814 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2815 if (data & i)
2816 v |= MDI_IO;
2817 else
2818 v &= ~MDI_IO;
2819 CSR_WRITE(sc, WMREG_CTRL, v);
2820 delay(10);
2821 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2822 delay(10);
2823 CSR_WRITE(sc, WMREG_CTRL, v);
2824 delay(10);
2825 }
2826 }
2827
2828 static uint32_t
2829 i82543_mii_recvbits(struct wm_softc *sc)
2830 {
2831 uint32_t v, i, data = 0;
2832
2833 v = CSR_READ(sc, WMREG_CTRL);
2834 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2835 v |= CTRL_SWDPIO(3);
2836
2837 CSR_WRITE(sc, WMREG_CTRL, v);
2838 delay(10);
2839 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2840 delay(10);
2841 CSR_WRITE(sc, WMREG_CTRL, v);
2842 delay(10);
2843
2844 for (i = 0; i < 16; i++) {
2845 data <<= 1;
2846 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2847 delay(10);
2848 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2849 data |= 1;
2850 CSR_WRITE(sc, WMREG_CTRL, v);
2851 delay(10);
2852 }
2853
2854 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2855 delay(10);
2856 CSR_WRITE(sc, WMREG_CTRL, v);
2857 delay(10);
2858
2859 return (data);
2860 }
2861
2862 #undef MDI_IO
2863 #undef MDI_DIR
2864 #undef MDI_CLK
2865
2866 /*
2867 * wm_gmii_i82543_readreg: [mii interface function]
2868 *
2869 * Read a PHY register on the GMII (i82543 version).
2870 */
2871 int
2872 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2873 {
2874 struct wm_softc *sc = (void *) self;
2875 int rv;
2876
2877 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2878 i82543_mii_sendbits(sc, reg | (phy << 5) |
2879 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2880 rv = i82543_mii_recvbits(sc) & 0xffff;
2881
2882 DPRINTF(WM_DEBUG_GMII,
2883 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2884 sc->sc_dev.dv_xname, phy, reg, rv));
2885
2886 return (rv);
2887 }
2888
2889 /*
2890 * wm_gmii_i82543_writereg: [mii interface function]
2891 *
2892 * Write a PHY register on the GMII (i82543 version).
2893 */
2894 void
2895 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2896 {
2897 struct wm_softc *sc = (void *) self;
2898
2899 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2900 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2901 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2902 (MII_COMMAND_START << 30), 32);
2903 }
2904
2905 /*
2906 * wm_gmii_i82544_readreg: [mii interface function]
2907 *
2908 * Read a PHY register on the GMII.
2909 */
2910 int
2911 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2912 {
2913 struct wm_softc *sc = (void *) self;
2914 uint32_t mdic;
2915 int i, rv;
2916
2917 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2918 MDIC_REGADD(reg));
2919
2920 for (i = 0; i < 100; i++) {
2921 mdic = CSR_READ(sc, WMREG_MDIC);
2922 if (mdic & MDIC_READY)
2923 break;
2924 delay(10);
2925 }
2926
2927 if ((mdic & MDIC_READY) == 0) {
2928 printf("%s: MDIC read timed out: phy %d reg %d\n",
2929 sc->sc_dev.dv_xname, phy, reg);
2930 rv = 0;
2931 } else if (mdic & MDIC_E) {
2932 #if 0 /* This is normal if no PHY is present. */
2933 printf("%s: MDIC read error: phy %d reg %d\n",
2934 sc->sc_dev.dv_xname, phy, reg);
2935 #endif
2936 rv = 0;
2937 } else {
2938 rv = MDIC_DATA(mdic);
2939 if (rv == 0xffff)
2940 rv = 0;
2941 }
2942
2943 return (rv);
2944 }
2945
2946 /*
2947 * wm_gmii_i82544_writereg: [mii interface function]
2948 *
2949 * Write a PHY register on the GMII.
2950 */
2951 void
2952 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2953 {
2954 struct wm_softc *sc = (void *) self;
2955 uint32_t mdic;
2956 int i;
2957
2958 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2959 MDIC_REGADD(reg) | MDIC_DATA(val));
2960
2961 for (i = 0; i < 100; i++) {
2962 mdic = CSR_READ(sc, WMREG_MDIC);
2963 if (mdic & MDIC_READY)
2964 break;
2965 delay(10);
2966 }
2967
2968 if ((mdic & MDIC_READY) == 0)
2969 printf("%s: MDIC write timed out: phy %d reg %d\n",
2970 sc->sc_dev.dv_xname, phy, reg);
2971 else if (mdic & MDIC_E)
2972 printf("%s: MDIC write error: phy %d reg %d\n",
2973 sc->sc_dev.dv_xname, phy, reg);
2974 }
2975
2976 /*
2977 * wm_gmii_statchg: [mii interface function]
2978 *
2979 * Callback from MII layer when media changes.
2980 */
2981 void
2982 wm_gmii_statchg(struct device *self)
2983 {
2984 struct wm_softc *sc = (void *) self;
2985
2986 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2987
2988 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2989 DPRINTF(WM_DEBUG_LINK,
2990 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2991 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2992 } else {
2993 DPRINTF(WM_DEBUG_LINK,
2994 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2995 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2996 }
2997
2998 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2999 }
3000