if_wm.c revision 1.39 1 /* $NetBSD: if_wm.c,v 1.39 2003/07/29 19:49:50 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.39 2003/07/29 19:49:50 thorpej Exp $");
53
54 #include "bpfilter.h"
55 #include "rnd.h"
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/callout.h>
60 #include <sys/mbuf.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/socket.h>
64 #include <sys/ioctl.h>
65 #include <sys/errno.h>
66 #include <sys/device.h>
67 #include <sys/queue.h>
68
69 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
70
71 #if NRND > 0
72 #include <sys/rnd.h>
73 #endif
74
75 #include <net/if.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_ether.h>
79
80 #if NBPFILTER > 0
81 #include <net/bpf.h>
82 #endif
83
84 #include <netinet/in.h> /* XXX for struct ip */
85 #include <netinet/in_systm.h> /* XXX for struct ip */
86 #include <netinet/ip.h> /* XXX for struct ip */
87 #include <netinet/tcp.h> /* XXX for struct tcphdr */
88
89 #include <machine/bus.h>
90 #include <machine/intr.h>
91 #include <machine/endian.h>
92
93 #include <dev/mii/mii.h>
94 #include <dev/mii/miivar.h>
95 #include <dev/mii/mii_bitbang.h>
96
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100
101 #include <dev/pci/if_wmreg.h>
102
103 #ifdef WM_DEBUG
104 #define WM_DEBUG_LINK 0x01
105 #define WM_DEBUG_TX 0x02
106 #define WM_DEBUG_RX 0x04
107 #define WM_DEBUG_GMII 0x08
108 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
109
110 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
111 #else
112 #define DPRINTF(x, y) /* nothing */
113 #endif /* WM_DEBUG */
114
115 /*
116 * Transmit descriptor list size. Due to errata, we can only have
117 * 256 hardware descriptors in the ring. We tell the upper layers
118 * that they can queue a lot of packets, and we go ahead and manage
119 * up to 64 of them at a time. We allow up to 16 DMA segments per
120 * packet.
121 */
122 #define WM_NTXSEGS 16
123 #define WM_IFQUEUELEN 256
124 #define WM_TXQUEUELEN 64
125 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
126 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
127 #define WM_NTXDESC 256
128 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
129 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
130 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
131
132 /*
133 * Receive descriptor list size. We have one Rx buffer for normal
134 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
135 * packet. We allocate 256 receive descriptors, each with a 2k
136 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
137 */
138 #define WM_NRXDESC 256
139 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
140 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
141 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
142
143 /*
144 * Control structures are DMA'd to the i82542 chip. We allocate them in
145 * a single clump that maps to a single DMA segment to make serveral things
146 * easier.
147 */
148 struct wm_control_data {
149 /*
150 * The transmit descriptors.
151 */
152 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
153
154 /*
155 * The receive descriptors.
156 */
157 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
158 };
159
160 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
161 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
162 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
163
164 /*
165 * Software state for transmit jobs.
166 */
167 struct wm_txsoft {
168 struct mbuf *txs_mbuf; /* head of our mbuf chain */
169 bus_dmamap_t txs_dmamap; /* our DMA map */
170 int txs_firstdesc; /* first descriptor in packet */
171 int txs_lastdesc; /* last descriptor in packet */
172 int txs_ndesc; /* # of descriptors used */
173 };
174
175 /*
176 * Software state for receive buffers. Each descriptor gets a
177 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
178 * more than one buffer, we chain them together.
179 */
180 struct wm_rxsoft {
181 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
182 bus_dmamap_t rxs_dmamap; /* our DMA map */
183 };
184
185 /*
186 * Software state per device.
187 */
188 struct wm_softc {
189 struct device sc_dev; /* generic device information */
190 bus_space_tag_t sc_st; /* bus space tag */
191 bus_space_handle_t sc_sh; /* bus space handle */
192 bus_dma_tag_t sc_dmat; /* bus DMA tag */
193 struct ethercom sc_ethercom; /* ethernet common data */
194 void *sc_sdhook; /* shutdown hook */
195
196 int sc_type; /* chip type; see below */
197 int sc_flags; /* flags; see below */
198
199 void *sc_ih; /* interrupt cookie */
200
201 struct mii_data sc_mii; /* MII/media information */
202
203 struct callout sc_tick_ch; /* tick callout */
204
205 bus_dmamap_t sc_cddmamap; /* control data DMA map */
206 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
207
208 /*
209 * Software state for the transmit and receive descriptors.
210 */
211 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
212 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
213
214 /*
215 * Control data structures.
216 */
217 struct wm_control_data *sc_control_data;
218 #define sc_txdescs sc_control_data->wcd_txdescs
219 #define sc_rxdescs sc_control_data->wcd_rxdescs
220
221 #ifdef WM_EVENT_COUNTERS
222 /* Event counters. */
223 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
224 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
225 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
226 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
227 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
228 struct evcnt sc_ev_rxintr; /* Rx interrupts */
229 struct evcnt sc_ev_linkintr; /* Link interrupts */
230
231 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
232 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
233 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
234 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
235
236 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
237 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
238 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
239
240 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
241 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
242
243 struct evcnt sc_ev_tu; /* Tx underrun */
244 #endif /* WM_EVENT_COUNTERS */
245
246 bus_addr_t sc_tdt_reg; /* offset of TDT register */
247
248 int sc_txfree; /* number of free Tx descriptors */
249 int sc_txnext; /* next ready Tx descriptor */
250
251 int sc_txsfree; /* number of free Tx jobs */
252 int sc_txsnext; /* next free Tx job */
253 int sc_txsdirty; /* dirty Tx jobs */
254
255 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
256 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
257
258 bus_addr_t sc_rdt_reg; /* offset of RDT register */
259
260 int sc_rxptr; /* next ready Rx descriptor/queue ent */
261 int sc_rxdiscard;
262 int sc_rxlen;
263 struct mbuf *sc_rxhead;
264 struct mbuf *sc_rxtail;
265 struct mbuf **sc_rxtailp;
266
267 uint32_t sc_ctrl; /* prototype CTRL register */
268 #if 0
269 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
270 #endif
271 uint32_t sc_icr; /* prototype interrupt bits */
272 uint32_t sc_tctl; /* prototype TCTL register */
273 uint32_t sc_rctl; /* prototype RCTL register */
274 uint32_t sc_txcw; /* prototype TXCW register */
275 uint32_t sc_tipg; /* prototype TIPG register */
276
277 int sc_tbi_linkup; /* TBI link status */
278 int sc_tbi_anstate; /* autonegotiation state */
279
280 int sc_mchash_type; /* multicast filter offset */
281
282 #if NRND > 0
283 rndsource_element_t rnd_source; /* random source */
284 #endif
285 };
286
287 #define WM_RXCHAIN_RESET(sc) \
288 do { \
289 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
290 *(sc)->sc_rxtailp = NULL; \
291 (sc)->sc_rxlen = 0; \
292 } while (/*CONSTCOND*/0)
293
294 #define WM_RXCHAIN_LINK(sc, m) \
295 do { \
296 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
297 (sc)->sc_rxtailp = &(m)->m_next; \
298 } while (/*CONSTCOND*/0)
299
300 /* sc_type */
301 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
302 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
303 #define WM_T_82543 2 /* i82543 */
304 #define WM_T_82544 3 /* i82544 */
305 #define WM_T_82540 4 /* i82540 */
306 #define WM_T_82545 5 /* i82545 */
307 #define WM_T_82546 6 /* i82546 */
308
309 /* sc_flags */
310 #define WM_F_HAS_MII 0x01 /* has MII */
311 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
312
313 #ifdef WM_EVENT_COUNTERS
314 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
315 #else
316 #define WM_EVCNT_INCR(ev) /* nothing */
317 #endif
318
319 #define CSR_READ(sc, reg) \
320 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
321 #define CSR_WRITE(sc, reg, val) \
322 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
323
324 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
325 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
326
327 #define WM_CDTXSYNC(sc, x, n, ops) \
328 do { \
329 int __x, __n; \
330 \
331 __x = (x); \
332 __n = (n); \
333 \
334 /* If it will wrap around, sync to the end of the ring. */ \
335 if ((__x + __n) > WM_NTXDESC) { \
336 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
337 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
338 (WM_NTXDESC - __x), (ops)); \
339 __n -= (WM_NTXDESC - __x); \
340 __x = 0; \
341 } \
342 \
343 /* Now sync whatever is left. */ \
344 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
345 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
346 } while (/*CONSTCOND*/0)
347
348 #define WM_CDRXSYNC(sc, x, ops) \
349 do { \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
352 } while (/*CONSTCOND*/0)
353
354 #define WM_INIT_RXDESC(sc, x) \
355 do { \
356 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
357 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
358 struct mbuf *__m = __rxs->rxs_mbuf; \
359 \
360 /* \
361 * Note: We scoot the packet forward 2 bytes in the buffer \
362 * so that the payload after the Ethernet header is aligned \
363 * to a 4-byte boundary. \
364 * \
365 * XXX BRAINDAMAGE ALERT! \
366 * The stupid chip uses the same size for every buffer, which \
367 * is set in the Receive Control register. We are using the 2K \
368 * size option, but what we REALLY want is (2K - 2)! For this \
369 * reason, we can't accept packets longer than the standard \
370 * Ethernet MTU, without incurring a big penalty to copy every \
371 * incoming packet to a new, suitably aligned buffer. \
372 * \
373 * We'll need to make some changes to the layer 3/4 parts of \
374 * the stack (to copy the headers to a new buffer if not \
375 * aligned) in order to support large MTU on this chip. Lame. \
376 */ \
377 __m->m_data = __m->m_ext.ext_buf + 2; \
378 \
379 __rxd->wrx_addr.wa_low = \
380 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
381 __rxd->wrx_addr.wa_high = 0; \
382 __rxd->wrx_len = 0; \
383 __rxd->wrx_cksum = 0; \
384 __rxd->wrx_status = 0; \
385 __rxd->wrx_errors = 0; \
386 __rxd->wrx_special = 0; \
387 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
388 \
389 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
390 } while (/*CONSTCOND*/0)
391
392 void wm_start(struct ifnet *);
393 void wm_watchdog(struct ifnet *);
394 int wm_ioctl(struct ifnet *, u_long, caddr_t);
395 int wm_init(struct ifnet *);
396 void wm_stop(struct ifnet *, int);
397
398 void wm_shutdown(void *);
399
400 void wm_reset(struct wm_softc *);
401 void wm_rxdrain(struct wm_softc *);
402 int wm_add_rxbuf(struct wm_softc *, int);
403 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
404 void wm_tick(void *);
405
406 void wm_set_filter(struct wm_softc *);
407
408 int wm_intr(void *);
409 void wm_txintr(struct wm_softc *);
410 void wm_rxintr(struct wm_softc *);
411 void wm_linkintr(struct wm_softc *, uint32_t);
412
413 void wm_tbi_mediainit(struct wm_softc *);
414 int wm_tbi_mediachange(struct ifnet *);
415 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
416
417 void wm_tbi_set_linkled(struct wm_softc *);
418 void wm_tbi_check_link(struct wm_softc *);
419
420 void wm_gmii_reset(struct wm_softc *);
421
422 int wm_gmii_i82543_readreg(struct device *, int, int);
423 void wm_gmii_i82543_writereg(struct device *, int, int, int);
424
425 int wm_gmii_i82544_readreg(struct device *, int, int);
426 void wm_gmii_i82544_writereg(struct device *, int, int, int);
427
428 void wm_gmii_statchg(struct device *);
429
430 void wm_gmii_mediainit(struct wm_softc *);
431 int wm_gmii_mediachange(struct ifnet *);
432 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
433
434 int wm_match(struct device *, struct cfdata *, void *);
435 void wm_attach(struct device *, struct device *, void *);
436
437 CFATTACH_DECL(wm, sizeof(struct wm_softc),
438 wm_match, wm_attach, NULL, NULL);
439
440 /*
441 * Devices supported by this driver.
442 */
443 const struct wm_product {
444 pci_vendor_id_t wmp_vendor;
445 pci_product_id_t wmp_product;
446 const char *wmp_name;
447 int wmp_type;
448 int wmp_flags;
449 #define WMP_F_1000X 0x01
450 #define WMP_F_1000T 0x02
451 } wm_products[] = {
452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
453 "Intel i82542 1000BASE-X Ethernet",
454 WM_T_82542_2_1, WMP_F_1000X },
455
456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
457 "Intel i82543GC 1000BASE-X Ethernet",
458 WM_T_82543, WMP_F_1000X },
459
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
461 "Intel i82543GC 1000BASE-T Ethernet",
462 WM_T_82543, WMP_F_1000T },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
465 "Intel i82544EI 1000BASE-T Ethernet",
466 WM_T_82544, WMP_F_1000T },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
469 "Intel i82544EI 1000BASE-X Ethernet",
470 WM_T_82544, WMP_F_1000X },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
473 "Intel i82544GC 1000BASE-T Ethernet",
474 WM_T_82544, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
477 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
478 WM_T_82544, WMP_F_1000T },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
481 "Intel i82540EM 1000BASE-T Ethernet",
482 WM_T_82540, WMP_F_1000T },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
485 "Intel i82540EP 1000BASE-T Ethernet",
486 WM_T_82540, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
489 "Intel i82540EP 1000BASE-T Ethernet",
490 WM_T_82540, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
493 "Intel i82540EP 1000BASE-T Ethernet",
494 WM_T_82540, WMP_F_1000T },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
497 "Intel i82545EM 1000BASE-T Ethernet",
498 WM_T_82545, WMP_F_1000T },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
501 "Intel i82546EB 1000BASE-T Ethernet",
502 WM_T_82546, WMP_F_1000T },
503
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
505 "Intel i82546EB 1000BASE-T Ethernet",
506 WM_T_82546, WMP_F_1000T },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
509 "Intel i82545EM 1000BASE-X Ethernet",
510 WM_T_82545, WMP_F_1000X },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
513 "Intel i82546EB 1000BASE-X Ethernet",
514 WM_T_82546, WMP_F_1000X },
515
516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
517 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
518 WM_T_82540, WMP_F_1000T },
519
520 { 0, 0,
521 NULL,
522 0, 0 },
523 };
524
525 #ifdef WM_EVENT_COUNTERS
526 #if WM_NTXSEGS != 16
527 #error Update wm_txseg_evcnt_names
528 #endif
529 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
530 "txseg1",
531 "txseg2",
532 "txseg3",
533 "txseg4",
534 "txseg5",
535 "txseg6",
536 "txseg7",
537 "txseg8",
538 "txseg9",
539 "txseg10",
540 "txseg11",
541 "txseg12",
542 "txseg13",
543 "txseg14",
544 "txseg15",
545 "txseg16",
546 };
547 #endif /* WM_EVENT_COUNTERS */
548
549 static const struct wm_product *
550 wm_lookup(const struct pci_attach_args *pa)
551 {
552 const struct wm_product *wmp;
553
554 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
555 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
556 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
557 return (wmp);
558 }
559 return (NULL);
560 }
561
562 int
563 wm_match(struct device *parent, struct cfdata *cf, void *aux)
564 {
565 struct pci_attach_args *pa = aux;
566
567 if (wm_lookup(pa) != NULL)
568 return (1);
569
570 return (0);
571 }
572
573 void
574 wm_attach(struct device *parent, struct device *self, void *aux)
575 {
576 struct wm_softc *sc = (void *) self;
577 struct pci_attach_args *pa = aux;
578 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
579 pci_chipset_tag_t pc = pa->pa_pc;
580 pci_intr_handle_t ih;
581 const char *intrstr = NULL;
582 bus_space_tag_t memt;
583 bus_space_handle_t memh;
584 bus_dma_segment_t seg;
585 int memh_valid;
586 int i, rseg, error;
587 const struct wm_product *wmp;
588 uint8_t enaddr[ETHER_ADDR_LEN];
589 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
590 pcireg_t preg, memtype;
591 int pmreg;
592
593 callout_init(&sc->sc_tick_ch);
594
595 wmp = wm_lookup(pa);
596 if (wmp == NULL) {
597 printf("\n");
598 panic("wm_attach: impossible");
599 }
600
601 sc->sc_dmat = pa->pa_dmat;
602
603 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
604 aprint_naive(": Ethernet controller\n");
605 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
606
607 sc->sc_type = wmp->wmp_type;
608 if (sc->sc_type < WM_T_82543) {
609 if (preg < 2) {
610 aprint_error("%s: i82542 must be at least rev. 2\n",
611 sc->sc_dev.dv_xname);
612 return;
613 }
614 if (preg < 3)
615 sc->sc_type = WM_T_82542_2_0;
616 }
617
618 /*
619 * Some chips require a handshake to access the EEPROM.
620 */
621 if (sc->sc_type >= WM_T_82540)
622 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
623
624 /*
625 * Map the device.
626 */
627 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
628 switch (memtype) {
629 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
630 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
631 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
632 memtype, 0, &memt, &memh, NULL, NULL) == 0);
633 break;
634 default:
635 memh_valid = 0;
636 }
637
638 if (memh_valid) {
639 sc->sc_st = memt;
640 sc->sc_sh = memh;
641 } else {
642 aprint_error("%s: unable to map device registers\n",
643 sc->sc_dev.dv_xname);
644 return;
645 }
646
647 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
648 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
649 preg |= PCI_COMMAND_MASTER_ENABLE;
650 if (sc->sc_type < WM_T_82542_2_1)
651 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
652 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
653
654 /* Get it out of power save mode, if needed. */
655 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
656 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
657 PCI_PMCSR_STATE_MASK;
658 if (preg == PCI_PMCSR_STATE_D3) {
659 /*
660 * The card has lost all configuration data in
661 * this state, so punt.
662 */
663 aprint_error("%s: unable to wake from power state D3\n",
664 sc->sc_dev.dv_xname);
665 return;
666 }
667 if (preg != PCI_PMCSR_STATE_D0) {
668 aprint_normal("%s: waking up from power state D%d\n",
669 sc->sc_dev.dv_xname, preg);
670 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
671 PCI_PMCSR_STATE_D0);
672 }
673 }
674
675 /*
676 * Map and establish our interrupt.
677 */
678 if (pci_intr_map(pa, &ih)) {
679 aprint_error("%s: unable to map interrupt\n",
680 sc->sc_dev.dv_xname);
681 return;
682 }
683 intrstr = pci_intr_string(pc, ih);
684 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
685 if (sc->sc_ih == NULL) {
686 aprint_error("%s: unable to establish interrupt",
687 sc->sc_dev.dv_xname);
688 if (intrstr != NULL)
689 aprint_normal(" at %s", intrstr);
690 aprint_normal("\n");
691 return;
692 }
693 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
694
695 /*
696 * Allocate the control data structures, and create and load the
697 * DMA map for it.
698 */
699 if ((error = bus_dmamem_alloc(sc->sc_dmat,
700 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
701 0)) != 0) {
702 aprint_error(
703 "%s: unable to allocate control data, error = %d\n",
704 sc->sc_dev.dv_xname, error);
705 goto fail_0;
706 }
707
708 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
709 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
710 0)) != 0) {
711 aprint_error("%s: unable to map control data, error = %d\n",
712 sc->sc_dev.dv_xname, error);
713 goto fail_1;
714 }
715
716 if ((error = bus_dmamap_create(sc->sc_dmat,
717 sizeof(struct wm_control_data), 1,
718 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
719 aprint_error("%s: unable to create control data DMA map, "
720 "error = %d\n", sc->sc_dev.dv_xname, error);
721 goto fail_2;
722 }
723
724 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
725 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
726 0)) != 0) {
727 aprint_error(
728 "%s: unable to load control data DMA map, error = %d\n",
729 sc->sc_dev.dv_xname, error);
730 goto fail_3;
731 }
732
733 /*
734 * Create the transmit buffer DMA maps.
735 */
736 for (i = 0; i < WM_TXQUEUELEN; i++) {
737 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
738 WM_NTXSEGS, MCLBYTES, 0, 0,
739 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
740 aprint_error("%s: unable to create Tx DMA map %d, "
741 "error = %d\n", sc->sc_dev.dv_xname, i, error);
742 goto fail_4;
743 }
744 }
745
746 /*
747 * Create the receive buffer DMA maps.
748 */
749 for (i = 0; i < WM_NRXDESC; i++) {
750 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
751 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
752 aprint_error("%s: unable to create Rx DMA map %d, "
753 "error = %d\n", sc->sc_dev.dv_xname, i, error);
754 goto fail_5;
755 }
756 sc->sc_rxsoft[i].rxs_mbuf = NULL;
757 }
758
759 /*
760 * Reset the chip to a known state.
761 */
762 wm_reset(sc);
763
764 /*
765 * Read the Ethernet address from the EEPROM.
766 */
767 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
768 sizeof(myea) / sizeof(myea[0]), myea);
769 enaddr[0] = myea[0] & 0xff;
770 enaddr[1] = myea[0] >> 8;
771 enaddr[2] = myea[1] & 0xff;
772 enaddr[3] = myea[1] >> 8;
773 enaddr[4] = myea[2] & 0xff;
774 enaddr[5] = myea[2] >> 8;
775
776 /*
777 * Toggle the LSB of the MAC address on the second port
778 * of the i82546.
779 */
780 if (sc->sc_type == WM_T_82546) {
781 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
782 enaddr[5] ^= 1;
783 }
784
785 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
786 ether_sprintf(enaddr));
787
788 /*
789 * Read the config info from the EEPROM, and set up various
790 * bits in the control registers based on their contents.
791 */
792 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
793 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
794 if (sc->sc_type >= WM_T_82544)
795 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
796
797 if (cfg1 & EEPROM_CFG1_ILOS)
798 sc->sc_ctrl |= CTRL_ILOS;
799 if (sc->sc_type >= WM_T_82544) {
800 sc->sc_ctrl |=
801 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
802 CTRL_SWDPIO_SHIFT;
803 sc->sc_ctrl |=
804 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
805 CTRL_SWDPINS_SHIFT;
806 } else {
807 sc->sc_ctrl |=
808 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
809 CTRL_SWDPIO_SHIFT;
810 }
811
812 #if 0
813 if (sc->sc_type >= WM_T_82544) {
814 if (cfg1 & EEPROM_CFG1_IPS0)
815 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
816 if (cfg1 & EEPROM_CFG1_IPS1)
817 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
818 sc->sc_ctrl_ext |=
819 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
820 CTRL_EXT_SWDPIO_SHIFT;
821 sc->sc_ctrl_ext |=
822 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
823 CTRL_EXT_SWDPINS_SHIFT;
824 } else {
825 sc->sc_ctrl_ext |=
826 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
827 CTRL_EXT_SWDPIO_SHIFT;
828 }
829 #endif
830
831 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
832 #if 0
833 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
834 #endif
835
836 /*
837 * Set up some register offsets that are different between
838 * the i82542 and the i82543 and later chips.
839 */
840 if (sc->sc_type < WM_T_82543) {
841 sc->sc_rdt_reg = WMREG_OLD_RDT0;
842 sc->sc_tdt_reg = WMREG_OLD_TDT;
843 } else {
844 sc->sc_rdt_reg = WMREG_RDT;
845 sc->sc_tdt_reg = WMREG_TDT;
846 }
847
848 /*
849 * Determine if we should use flow control. We should
850 * always use it, unless we're on a i82542 < 2.1.
851 */
852 if (sc->sc_type >= WM_T_82542_2_1)
853 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
854
855 /*
856 * Determine if we're TBI or GMII mode, and initialize the
857 * media structures accordingly.
858 */
859 if (sc->sc_type < WM_T_82543 ||
860 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
861 if (wmp->wmp_flags & WMP_F_1000T)
862 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
863 "product!\n", sc->sc_dev.dv_xname);
864 wm_tbi_mediainit(sc);
865 } else {
866 if (wmp->wmp_flags & WMP_F_1000X)
867 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
868 "product!\n", sc->sc_dev.dv_xname);
869 wm_gmii_mediainit(sc);
870 }
871
872 ifp = &sc->sc_ethercom.ec_if;
873 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
874 ifp->if_softc = sc;
875 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
876 ifp->if_ioctl = wm_ioctl;
877 ifp->if_start = wm_start;
878 ifp->if_watchdog = wm_watchdog;
879 ifp->if_init = wm_init;
880 ifp->if_stop = wm_stop;
881 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
882 IFQ_SET_READY(&ifp->if_snd);
883
884 /*
885 * If we're a i82543 or greater, we can support VLANs.
886 */
887 if (sc->sc_type >= WM_T_82543)
888 sc->sc_ethercom.ec_capabilities |=
889 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
890
891 /*
892 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
893 * on i82543 and later.
894 */
895 if (sc->sc_type >= WM_T_82543)
896 ifp->if_capabilities |=
897 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
898
899 /*
900 * Attach the interface.
901 */
902 if_attach(ifp);
903 ether_ifattach(ifp, enaddr);
904 #if NRND > 0
905 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
906 RND_TYPE_NET, 0);
907 #endif
908
909 #ifdef WM_EVENT_COUNTERS
910 /* Attach event counters. */
911 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
912 NULL, sc->sc_dev.dv_xname, "txsstall");
913 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
914 NULL, sc->sc_dev.dv_xname, "txdstall");
915 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
916 NULL, sc->sc_dev.dv_xname, "txforceintr");
917 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
918 NULL, sc->sc_dev.dv_xname, "txdw");
919 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
920 NULL, sc->sc_dev.dv_xname, "txqe");
921 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
922 NULL, sc->sc_dev.dv_xname, "rxintr");
923 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
924 NULL, sc->sc_dev.dv_xname, "linkintr");
925
926 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "rxipsum");
928 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
929 NULL, sc->sc_dev.dv_xname, "rxtusum");
930 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
931 NULL, sc->sc_dev.dv_xname, "txipsum");
932 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
933 NULL, sc->sc_dev.dv_xname, "txtusum");
934
935 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txctx init");
937 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
938 NULL, sc->sc_dev.dv_xname, "txctx hit");
939 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
940 NULL, sc->sc_dev.dv_xname, "txctx miss");
941
942 for (i = 0; i < WM_NTXSEGS; i++)
943 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
944 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
945
946 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
947 NULL, sc->sc_dev.dv_xname, "txdrop");
948
949 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
950 NULL, sc->sc_dev.dv_xname, "tu");
951 #endif /* WM_EVENT_COUNTERS */
952
953 /*
954 * Make sure the interface is shutdown during reboot.
955 */
956 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
957 if (sc->sc_sdhook == NULL)
958 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
959 sc->sc_dev.dv_xname);
960 return;
961
962 /*
963 * Free any resources we've allocated during the failed attach
964 * attempt. Do this in reverse order and fall through.
965 */
966 fail_5:
967 for (i = 0; i < WM_NRXDESC; i++) {
968 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
969 bus_dmamap_destroy(sc->sc_dmat,
970 sc->sc_rxsoft[i].rxs_dmamap);
971 }
972 fail_4:
973 for (i = 0; i < WM_TXQUEUELEN; i++) {
974 if (sc->sc_txsoft[i].txs_dmamap != NULL)
975 bus_dmamap_destroy(sc->sc_dmat,
976 sc->sc_txsoft[i].txs_dmamap);
977 }
978 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
979 fail_3:
980 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
981 fail_2:
982 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
983 sizeof(struct wm_control_data));
984 fail_1:
985 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
986 fail_0:
987 return;
988 }
989
990 /*
991 * wm_shutdown:
992 *
993 * Make sure the interface is stopped at reboot time.
994 */
995 void
996 wm_shutdown(void *arg)
997 {
998 struct wm_softc *sc = arg;
999
1000 wm_stop(&sc->sc_ethercom.ec_if, 1);
1001 }
1002
1003 /*
1004 * wm_tx_cksum:
1005 *
1006 * Set up TCP/IP checksumming parameters for the
1007 * specified packet.
1008 */
1009 static int
1010 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1011 uint32_t *fieldsp)
1012 {
1013 struct mbuf *m0 = txs->txs_mbuf;
1014 struct livengood_tcpip_ctxdesc *t;
1015 uint32_t fields = 0, ipcs, tucs;
1016 struct ip *ip;
1017 struct ether_header *eh;
1018 int offset, iphl;
1019
1020 /*
1021 * XXX It would be nice if the mbuf pkthdr had offset
1022 * fields for the protocol headers.
1023 */
1024
1025 eh = mtod(m0, struct ether_header *);
1026 switch (htons(eh->ether_type)) {
1027 case ETHERTYPE_IP:
1028 iphl = sizeof(struct ip);
1029 offset = ETHER_HDR_LEN;
1030 break;
1031
1032 case ETHERTYPE_VLAN:
1033 iphl = sizeof(struct ip);
1034 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1035 break;
1036
1037 default:
1038 /*
1039 * Don't support this protocol or encapsulation.
1040 */
1041 *fieldsp = 0;
1042 *cmdp = 0;
1043 return (0);
1044 }
1045
1046 if (m0->m_len < (offset + iphl)) {
1047 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1048 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1049 "packet dropped\n", sc->sc_dev.dv_xname);
1050 return (ENOMEM);
1051 }
1052 m0 = txs->txs_mbuf;
1053 }
1054
1055 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1056 iphl = ip->ip_hl << 2;
1057
1058 /*
1059 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1060 * offload feature, if we load the context descriptor, we
1061 * MUST provide valid values for IPCSS and TUCSS fields.
1062 */
1063
1064 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1065 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1066 fields |= htole32(WTX_IXSM);
1067 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1068 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1069 WTX_TCPIP_IPCSE(offset + iphl - 1));
1070 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1071 /* Use the cached value. */
1072 ipcs = sc->sc_txctx_ipcs;
1073 } else {
1074 /* Just initialize it to the likely value anyway. */
1075 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1076 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1077 WTX_TCPIP_IPCSE(offset + iphl - 1));
1078 }
1079
1080 offset += iphl;
1081
1082 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1083 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1084 fields |= htole32(WTX_TXSM);
1085 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1086 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1087 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1088 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1089 /* Use the cached value. */
1090 tucs = sc->sc_txctx_tucs;
1091 } else {
1092 /* Just initialize it to a valid TCP context. */
1093 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1094 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1095 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1096 }
1097
1098 if (sc->sc_txctx_ipcs == ipcs &&
1099 sc->sc_txctx_tucs == tucs) {
1100 /* Cached context is fine. */
1101 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1102 } else {
1103 /* Fill in the context descriptor. */
1104 #ifdef WM_EVENT_COUNTERS
1105 if (sc->sc_txctx_ipcs == 0xffffffff &&
1106 sc->sc_txctx_tucs == 0xffffffff)
1107 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1108 else
1109 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1110 #endif
1111 t = (struct livengood_tcpip_ctxdesc *)
1112 &sc->sc_txdescs[sc->sc_txnext];
1113 t->tcpip_ipcs = ipcs;
1114 t->tcpip_tucs = tucs;
1115 t->tcpip_cmdlen =
1116 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1117 t->tcpip_seg = 0;
1118 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1119
1120 sc->sc_txctx_ipcs = ipcs;
1121 sc->sc_txctx_tucs = tucs;
1122
1123 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1124 txs->txs_ndesc++;
1125 }
1126
1127 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1128 *fieldsp = fields;
1129
1130 return (0);
1131 }
1132
1133 /*
1134 * wm_start: [ifnet interface function]
1135 *
1136 * Start packet transmission on the interface.
1137 */
1138 void
1139 wm_start(struct ifnet *ifp)
1140 {
1141 struct wm_softc *sc = ifp->if_softc;
1142 struct mbuf *m0;
1143 #if 0 /* XXXJRT */
1144 struct m_tag *mtag;
1145 #endif
1146 struct wm_txsoft *txs;
1147 bus_dmamap_t dmamap;
1148 int error, nexttx, lasttx, ofree, seg;
1149 uint32_t cksumcmd, cksumfields;
1150
1151 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1152 return;
1153
1154 /*
1155 * Remember the previous number of free descriptors.
1156 */
1157 ofree = sc->sc_txfree;
1158
1159 /*
1160 * Loop through the send queue, setting up transmit descriptors
1161 * until we drain the queue, or use up all available transmit
1162 * descriptors.
1163 */
1164 for (;;) {
1165 /* Grab a packet off the queue. */
1166 IFQ_POLL(&ifp->if_snd, m0);
1167 if (m0 == NULL)
1168 break;
1169
1170 DPRINTF(WM_DEBUG_TX,
1171 ("%s: TX: have packet to transmit: %p\n",
1172 sc->sc_dev.dv_xname, m0));
1173
1174 /* Get a work queue entry. */
1175 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1176 wm_txintr(sc);
1177 if (sc->sc_txsfree == 0) {
1178 DPRINTF(WM_DEBUG_TX,
1179 ("%s: TX: no free job descriptors\n",
1180 sc->sc_dev.dv_xname));
1181 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1182 break;
1183 }
1184 }
1185
1186 txs = &sc->sc_txsoft[sc->sc_txsnext];
1187 dmamap = txs->txs_dmamap;
1188
1189 /*
1190 * Load the DMA map. If this fails, the packet either
1191 * didn't fit in the allotted number of segments, or we
1192 * were short on resources. For the too-many-segments
1193 * case, we simply report an error and drop the packet,
1194 * since we can't sanely copy a jumbo packet to a single
1195 * buffer.
1196 */
1197 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1198 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1199 if (error) {
1200 if (error == EFBIG) {
1201 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1202 printf("%s: Tx packet consumes too many "
1203 "DMA segments, dropping...\n",
1204 sc->sc_dev.dv_xname);
1205 IFQ_DEQUEUE(&ifp->if_snd, m0);
1206 m_freem(m0);
1207 continue;
1208 }
1209 /*
1210 * Short on resources, just stop for now.
1211 */
1212 DPRINTF(WM_DEBUG_TX,
1213 ("%s: TX: dmamap load failed: %d\n",
1214 sc->sc_dev.dv_xname, error));
1215 break;
1216 }
1217
1218 /*
1219 * Ensure we have enough descriptors free to describe
1220 * the packet. Note, we always reserve one descriptor
1221 * at the end of the ring due to the semantics of the
1222 * TDT register, plus one more in the event we need
1223 * to re-load checksum offload context.
1224 */
1225 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1226 /*
1227 * Not enough free descriptors to transmit this
1228 * packet. We haven't committed anything yet,
1229 * so just unload the DMA map, put the packet
1230 * pack on the queue, and punt. Notify the upper
1231 * layer that there are no more slots left.
1232 */
1233 DPRINTF(WM_DEBUG_TX,
1234 ("%s: TX: need %d descriptors, have %d\n",
1235 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1236 sc->sc_txfree - 1));
1237 ifp->if_flags |= IFF_OACTIVE;
1238 bus_dmamap_unload(sc->sc_dmat, dmamap);
1239 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1240 break;
1241 }
1242
1243 IFQ_DEQUEUE(&ifp->if_snd, m0);
1244
1245 /*
1246 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1247 */
1248
1249 /* Sync the DMA map. */
1250 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1251 BUS_DMASYNC_PREWRITE);
1252
1253 DPRINTF(WM_DEBUG_TX,
1254 ("%s: TX: packet has %d DMA segments\n",
1255 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1256
1257 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1258
1259 /*
1260 * Store a pointer to the packet so that we can free it
1261 * later.
1262 *
1263 * Initially, we consider the number of descriptors the
1264 * packet uses the number of DMA segments. This may be
1265 * incremented by 1 if we do checksum offload (a descriptor
1266 * is used to set the checksum context).
1267 */
1268 txs->txs_mbuf = m0;
1269 txs->txs_firstdesc = sc->sc_txnext;
1270 txs->txs_ndesc = dmamap->dm_nsegs;
1271
1272 /*
1273 * Set up checksum offload parameters for
1274 * this packet.
1275 */
1276 if (m0->m_pkthdr.csum_flags &
1277 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1278 if (wm_tx_cksum(sc, txs, &cksumcmd,
1279 &cksumfields) != 0) {
1280 /* Error message already displayed. */
1281 bus_dmamap_unload(sc->sc_dmat, dmamap);
1282 continue;
1283 }
1284 } else {
1285 cksumcmd = 0;
1286 cksumfields = 0;
1287 }
1288
1289 cksumcmd |= htole32(WTX_CMD_IDE);
1290
1291 /*
1292 * Initialize the transmit descriptor.
1293 */
1294 for (nexttx = sc->sc_txnext, seg = 0;
1295 seg < dmamap->dm_nsegs;
1296 seg++, nexttx = WM_NEXTTX(nexttx)) {
1297 /*
1298 * Note: we currently only use 32-bit DMA
1299 * addresses.
1300 */
1301 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1302 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1303 htole32(dmamap->dm_segs[seg].ds_addr);
1304 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1305 htole32(dmamap->dm_segs[seg].ds_len);
1306 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1307 cksumfields;
1308 lasttx = nexttx;
1309
1310 DPRINTF(WM_DEBUG_TX,
1311 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1312 sc->sc_dev.dv_xname, nexttx,
1313 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1314 (uint32_t) dmamap->dm_segs[seg].ds_len));
1315 }
1316
1317 /*
1318 * Set up the command byte on the last descriptor of
1319 * the packet. If we're in the interrupt delay window,
1320 * delay the interrupt.
1321 */
1322 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1323 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1324
1325 #if 0 /* XXXJRT */
1326 /*
1327 * If VLANs are enabled and the packet has a VLAN tag, set
1328 * up the descriptor to encapsulate the packet for us.
1329 *
1330 * This is only valid on the last descriptor of the packet.
1331 */
1332 if (sc->sc_ethercom.ec_nvlans != 0 &&
1333 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1334 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1335 htole32(WTX_CMD_VLE);
1336 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1337 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1338 }
1339 #endif /* XXXJRT */
1340
1341 txs->txs_lastdesc = lasttx;
1342
1343 DPRINTF(WM_DEBUG_TX,
1344 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1345 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1346
1347 /* Sync the descriptors we're using. */
1348 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1349 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1350
1351 /* Give the packet to the chip. */
1352 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1353
1354 DPRINTF(WM_DEBUG_TX,
1355 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1356
1357 DPRINTF(WM_DEBUG_TX,
1358 ("%s: TX: finished transmitting packet, job %d\n",
1359 sc->sc_dev.dv_xname, sc->sc_txsnext));
1360
1361 /* Advance the tx pointer. */
1362 sc->sc_txfree -= txs->txs_ndesc;
1363 sc->sc_txnext = nexttx;
1364
1365 sc->sc_txsfree--;
1366 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1367
1368 #if NBPFILTER > 0
1369 /* Pass the packet to any BPF listeners. */
1370 if (ifp->if_bpf)
1371 bpf_mtap(ifp->if_bpf, m0);
1372 #endif /* NBPFILTER > 0 */
1373 }
1374
1375 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1376 /* No more slots; notify upper layer. */
1377 ifp->if_flags |= IFF_OACTIVE;
1378 }
1379
1380 if (sc->sc_txfree != ofree) {
1381 /* Set a watchdog timer in case the chip flakes out. */
1382 ifp->if_timer = 5;
1383 }
1384 }
1385
1386 /*
1387 * wm_watchdog: [ifnet interface function]
1388 *
1389 * Watchdog timer handler.
1390 */
1391 void
1392 wm_watchdog(struct ifnet *ifp)
1393 {
1394 struct wm_softc *sc = ifp->if_softc;
1395
1396 /*
1397 * Since we're using delayed interrupts, sweep up
1398 * before we report an error.
1399 */
1400 wm_txintr(sc);
1401
1402 if (sc->sc_txfree != WM_NTXDESC) {
1403 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1404 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1405 sc->sc_txnext);
1406 ifp->if_oerrors++;
1407
1408 /* Reset the interface. */
1409 (void) wm_init(ifp);
1410 }
1411
1412 /* Try to get more packets going. */
1413 wm_start(ifp);
1414 }
1415
1416 /*
1417 * wm_ioctl: [ifnet interface function]
1418 *
1419 * Handle control requests from the operator.
1420 */
1421 int
1422 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1423 {
1424 struct wm_softc *sc = ifp->if_softc;
1425 struct ifreq *ifr = (struct ifreq *) data;
1426 int s, error;
1427
1428 s = splnet();
1429
1430 switch (cmd) {
1431 case SIOCSIFMEDIA:
1432 case SIOCGIFMEDIA:
1433 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1434 break;
1435
1436 default:
1437 error = ether_ioctl(ifp, cmd, data);
1438 if (error == ENETRESET) {
1439 /*
1440 * Multicast list has changed; set the hardware filter
1441 * accordingly.
1442 */
1443 wm_set_filter(sc);
1444 error = 0;
1445 }
1446 break;
1447 }
1448
1449 /* Try to get more packets going. */
1450 wm_start(ifp);
1451
1452 splx(s);
1453 return (error);
1454 }
1455
1456 /*
1457 * wm_intr:
1458 *
1459 * Interrupt service routine.
1460 */
1461 int
1462 wm_intr(void *arg)
1463 {
1464 struct wm_softc *sc = arg;
1465 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1466 uint32_t icr;
1467 int wantinit, handled = 0;
1468
1469 for (wantinit = 0; wantinit == 0;) {
1470 icr = CSR_READ(sc, WMREG_ICR);
1471 if ((icr & sc->sc_icr) == 0)
1472 break;
1473
1474 #if 0 /*NRND > 0*/
1475 if (RND_ENABLED(&sc->rnd_source))
1476 rnd_add_uint32(&sc->rnd_source, icr);
1477 #endif
1478
1479 handled = 1;
1480
1481 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1482 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1483 DPRINTF(WM_DEBUG_RX,
1484 ("%s: RX: got Rx intr 0x%08x\n",
1485 sc->sc_dev.dv_xname,
1486 icr & (ICR_RXDMT0|ICR_RXT0)));
1487 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1488 }
1489 #endif
1490 wm_rxintr(sc);
1491
1492 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1493 if (icr & ICR_TXDW) {
1494 DPRINTF(WM_DEBUG_TX,
1495 ("%s: TX: got TDXW interrupt\n",
1496 sc->sc_dev.dv_xname));
1497 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1498 }
1499 #endif
1500 wm_txintr(sc);
1501
1502 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1503 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1504 wm_linkintr(sc, icr);
1505 }
1506
1507 if (icr & ICR_RXO) {
1508 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1509 wantinit = 1;
1510 }
1511 }
1512
1513 if (handled) {
1514 if (wantinit)
1515 wm_init(ifp);
1516
1517 /* Try to get more packets going. */
1518 wm_start(ifp);
1519 }
1520
1521 return (handled);
1522 }
1523
1524 /*
1525 * wm_txintr:
1526 *
1527 * Helper; handle transmit interrupts.
1528 */
1529 void
1530 wm_txintr(struct wm_softc *sc)
1531 {
1532 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1533 struct wm_txsoft *txs;
1534 uint8_t status;
1535 int i;
1536
1537 ifp->if_flags &= ~IFF_OACTIVE;
1538
1539 /*
1540 * Go through the Tx list and free mbufs for those
1541 * frames which have been transmitted.
1542 */
1543 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1544 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1545 txs = &sc->sc_txsoft[i];
1546
1547 DPRINTF(WM_DEBUG_TX,
1548 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1549
1550 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1551 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1552
1553 status = le32toh(sc->sc_txdescs[
1554 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1555 if ((status & WTX_ST_DD) == 0) {
1556 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1557 BUS_DMASYNC_PREREAD);
1558 break;
1559 }
1560
1561 DPRINTF(WM_DEBUG_TX,
1562 ("%s: TX: job %d done: descs %d..%d\n",
1563 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1564 txs->txs_lastdesc));
1565
1566 /*
1567 * XXX We should probably be using the statistics
1568 * XXX registers, but I don't know if they exist
1569 * XXX on chips before the i82544.
1570 */
1571
1572 #ifdef WM_EVENT_COUNTERS
1573 if (status & WTX_ST_TU)
1574 WM_EVCNT_INCR(&sc->sc_ev_tu);
1575 #endif /* WM_EVENT_COUNTERS */
1576
1577 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1578 ifp->if_oerrors++;
1579 if (status & WTX_ST_LC)
1580 printf("%s: late collision\n",
1581 sc->sc_dev.dv_xname);
1582 else if (status & WTX_ST_EC) {
1583 ifp->if_collisions += 16;
1584 printf("%s: excessive collisions\n",
1585 sc->sc_dev.dv_xname);
1586 }
1587 } else
1588 ifp->if_opackets++;
1589
1590 sc->sc_txfree += txs->txs_ndesc;
1591 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1592 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1593 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1594 m_freem(txs->txs_mbuf);
1595 txs->txs_mbuf = NULL;
1596 }
1597
1598 /* Update the dirty transmit buffer pointer. */
1599 sc->sc_txsdirty = i;
1600 DPRINTF(WM_DEBUG_TX,
1601 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1602
1603 /*
1604 * If there are no more pending transmissions, cancel the watchdog
1605 * timer.
1606 */
1607 if (sc->sc_txsfree == WM_TXQUEUELEN)
1608 ifp->if_timer = 0;
1609 }
1610
1611 /*
1612 * wm_rxintr:
1613 *
1614 * Helper; handle receive interrupts.
1615 */
1616 void
1617 wm_rxintr(struct wm_softc *sc)
1618 {
1619 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1620 struct wm_rxsoft *rxs;
1621 struct mbuf *m;
1622 int i, len;
1623 uint8_t status, errors;
1624
1625 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1626 rxs = &sc->sc_rxsoft[i];
1627
1628 DPRINTF(WM_DEBUG_RX,
1629 ("%s: RX: checking descriptor %d\n",
1630 sc->sc_dev.dv_xname, i));
1631
1632 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1633
1634 status = sc->sc_rxdescs[i].wrx_status;
1635 errors = sc->sc_rxdescs[i].wrx_errors;
1636 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1637
1638 if ((status & WRX_ST_DD) == 0) {
1639 /*
1640 * We have processed all of the receive descriptors.
1641 */
1642 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1643 break;
1644 }
1645
1646 if (__predict_false(sc->sc_rxdiscard)) {
1647 DPRINTF(WM_DEBUG_RX,
1648 ("%s: RX: discarding contents of descriptor %d\n",
1649 sc->sc_dev.dv_xname, i));
1650 WM_INIT_RXDESC(sc, i);
1651 if (status & WRX_ST_EOP) {
1652 /* Reset our state. */
1653 DPRINTF(WM_DEBUG_RX,
1654 ("%s: RX: resetting rxdiscard -> 0\n",
1655 sc->sc_dev.dv_xname));
1656 sc->sc_rxdiscard = 0;
1657 }
1658 continue;
1659 }
1660
1661 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1662 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1663
1664 m = rxs->rxs_mbuf;
1665
1666 /*
1667 * Add a new receive buffer to the ring.
1668 */
1669 if (wm_add_rxbuf(sc, i) != 0) {
1670 /*
1671 * Failed, throw away what we've done so
1672 * far, and discard the rest of the packet.
1673 */
1674 ifp->if_ierrors++;
1675 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1676 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1677 WM_INIT_RXDESC(sc, i);
1678 if ((status & WRX_ST_EOP) == 0)
1679 sc->sc_rxdiscard = 1;
1680 if (sc->sc_rxhead != NULL)
1681 m_freem(sc->sc_rxhead);
1682 WM_RXCHAIN_RESET(sc);
1683 DPRINTF(WM_DEBUG_RX,
1684 ("%s: RX: Rx buffer allocation failed, "
1685 "dropping packet%s\n", sc->sc_dev.dv_xname,
1686 sc->sc_rxdiscard ? " (discard)" : ""));
1687 continue;
1688 }
1689
1690 WM_RXCHAIN_LINK(sc, m);
1691
1692 m->m_len = len;
1693
1694 DPRINTF(WM_DEBUG_RX,
1695 ("%s: RX: buffer at %p len %d\n",
1696 sc->sc_dev.dv_xname, m->m_data, len));
1697
1698 /*
1699 * If this is not the end of the packet, keep
1700 * looking.
1701 */
1702 if ((status & WRX_ST_EOP) == 0) {
1703 sc->sc_rxlen += len;
1704 DPRINTF(WM_DEBUG_RX,
1705 ("%s: RX: not yet EOP, rxlen -> %d\n",
1706 sc->sc_dev.dv_xname, sc->sc_rxlen));
1707 continue;
1708 }
1709
1710 /*
1711 * Okay, we have the entire packet now...
1712 */
1713 *sc->sc_rxtailp = NULL;
1714 m = sc->sc_rxhead;
1715 len += sc->sc_rxlen;
1716
1717 WM_RXCHAIN_RESET(sc);
1718
1719 DPRINTF(WM_DEBUG_RX,
1720 ("%s: RX: have entire packet, len -> %d\n",
1721 sc->sc_dev.dv_xname, len));
1722
1723 /*
1724 * If an error occurred, update stats and drop the packet.
1725 */
1726 if (errors &
1727 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1728 ifp->if_ierrors++;
1729 if (errors & WRX_ER_SE)
1730 printf("%s: symbol error\n",
1731 sc->sc_dev.dv_xname);
1732 else if (errors & WRX_ER_SEQ)
1733 printf("%s: receive sequence error\n",
1734 sc->sc_dev.dv_xname);
1735 else if (errors & WRX_ER_CE)
1736 printf("%s: CRC error\n",
1737 sc->sc_dev.dv_xname);
1738 m_freem(m);
1739 continue;
1740 }
1741
1742 /*
1743 * No errors. Receive the packet.
1744 *
1745 * Note, we have configured the chip to include the
1746 * CRC with every packet.
1747 */
1748 m->m_flags |= M_HASFCS;
1749 m->m_pkthdr.rcvif = ifp;
1750 m->m_pkthdr.len = len;
1751
1752 #if 0 /* XXXJRT */
1753 /*
1754 * If VLANs are enabled, VLAN packets have been unwrapped
1755 * for us. Associate the tag with the packet.
1756 */
1757 if (sc->sc_ethercom.ec_nvlans != 0 &&
1758 (status & WRX_ST_VP) != 0) {
1759 struct m_tag *vtag;
1760
1761 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1762 M_NOWAIT);
1763 if (vtag == NULL) {
1764 ifp->if_ierrors++;
1765 printf("%s: unable to allocate VLAN tag\n",
1766 sc->sc_dev.dv_xname);
1767 m_freem(m);
1768 continue;
1769 }
1770
1771 *(u_int *)(vtag + 1) =
1772 le16toh(sc->sc_rxdescs[i].wrx_special);
1773 }
1774 #endif /* XXXJRT */
1775
1776 /*
1777 * Set up checksum info for this packet.
1778 */
1779 if (status & WRX_ST_IPCS) {
1780 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1781 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1782 if (errors & WRX_ER_IPE)
1783 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1784 }
1785 if (status & WRX_ST_TCPCS) {
1786 /*
1787 * Note: we don't know if this was TCP or UDP,
1788 * so we just set both bits, and expect the
1789 * upper layers to deal.
1790 */
1791 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1792 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1793 if (errors & WRX_ER_TCPE)
1794 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1795 }
1796
1797 ifp->if_ipackets++;
1798
1799 #if NBPFILTER > 0
1800 /* Pass this up to any BPF listeners. */
1801 if (ifp->if_bpf)
1802 bpf_mtap(ifp->if_bpf, m);
1803 #endif /* NBPFILTER > 0 */
1804
1805 /* Pass it on. */
1806 (*ifp->if_input)(ifp, m);
1807 }
1808
1809 /* Update the receive pointer. */
1810 sc->sc_rxptr = i;
1811
1812 DPRINTF(WM_DEBUG_RX,
1813 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1814 }
1815
1816 /*
1817 * wm_linkintr:
1818 *
1819 * Helper; handle link interrupts.
1820 */
1821 void
1822 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1823 {
1824 uint32_t status;
1825
1826 /*
1827 * If we get a link status interrupt on a 1000BASE-T
1828 * device, just fall into the normal MII tick path.
1829 */
1830 if (sc->sc_flags & WM_F_HAS_MII) {
1831 if (icr & ICR_LSC) {
1832 DPRINTF(WM_DEBUG_LINK,
1833 ("%s: LINK: LSC -> mii_tick\n",
1834 sc->sc_dev.dv_xname));
1835 mii_tick(&sc->sc_mii);
1836 } else if (icr & ICR_RXSEQ) {
1837 DPRINTF(WM_DEBUG_LINK,
1838 ("%s: LINK Receive sequence error\n",
1839 sc->sc_dev.dv_xname));
1840 }
1841 return;
1842 }
1843
1844 /*
1845 * If we are now receiving /C/, check for link again in
1846 * a couple of link clock ticks.
1847 */
1848 if (icr & ICR_RXCFG) {
1849 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1850 sc->sc_dev.dv_xname));
1851 sc->sc_tbi_anstate = 2;
1852 }
1853
1854 if (icr & ICR_LSC) {
1855 status = CSR_READ(sc, WMREG_STATUS);
1856 if (status & STATUS_LU) {
1857 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1858 sc->sc_dev.dv_xname,
1859 (status & STATUS_FD) ? "FDX" : "HDX"));
1860 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1861 if (status & STATUS_FD)
1862 sc->sc_tctl |=
1863 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1864 else
1865 sc->sc_tctl |=
1866 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1867 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1868 sc->sc_tbi_linkup = 1;
1869 } else {
1870 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1871 sc->sc_dev.dv_xname));
1872 sc->sc_tbi_linkup = 0;
1873 }
1874 sc->sc_tbi_anstate = 2;
1875 wm_tbi_set_linkled(sc);
1876 } else if (icr & ICR_RXSEQ) {
1877 DPRINTF(WM_DEBUG_LINK,
1878 ("%s: LINK: Receive sequence error\n",
1879 sc->sc_dev.dv_xname));
1880 }
1881 }
1882
1883 /*
1884 * wm_tick:
1885 *
1886 * One second timer, used to check link status, sweep up
1887 * completed transmit jobs, etc.
1888 */
1889 void
1890 wm_tick(void *arg)
1891 {
1892 struct wm_softc *sc = arg;
1893 int s;
1894
1895 s = splnet();
1896
1897 if (sc->sc_flags & WM_F_HAS_MII)
1898 mii_tick(&sc->sc_mii);
1899 else
1900 wm_tbi_check_link(sc);
1901
1902 splx(s);
1903
1904 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1905 }
1906
1907 /*
1908 * wm_reset:
1909 *
1910 * Reset the i82542 chip.
1911 */
1912 void
1913 wm_reset(struct wm_softc *sc)
1914 {
1915 int i;
1916
1917 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1918 delay(10000);
1919
1920 for (i = 0; i < 1000; i++) {
1921 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1922 return;
1923 delay(20);
1924 }
1925
1926 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1927 printf("%s: WARNING: reset failed to complete\n",
1928 sc->sc_dev.dv_xname);
1929 }
1930
1931 /*
1932 * wm_init: [ifnet interface function]
1933 *
1934 * Initialize the interface. Must be called at splnet().
1935 */
1936 int
1937 wm_init(struct ifnet *ifp)
1938 {
1939 struct wm_softc *sc = ifp->if_softc;
1940 struct wm_rxsoft *rxs;
1941 int i, error = 0;
1942 uint32_t reg;
1943
1944 /* Cancel any pending I/O. */
1945 wm_stop(ifp, 0);
1946
1947 /* Reset the chip to a known state. */
1948 wm_reset(sc);
1949
1950 /* Initialize the transmit descriptor ring. */
1951 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1952 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1953 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1954 sc->sc_txfree = WM_NTXDESC;
1955 sc->sc_txnext = 0;
1956
1957 sc->sc_txctx_ipcs = 0xffffffff;
1958 sc->sc_txctx_tucs = 0xffffffff;
1959
1960 if (sc->sc_type < WM_T_82543) {
1961 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1962 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1963 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1964 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1965 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1966 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1967 } else {
1968 CSR_WRITE(sc, WMREG_TBDAH, 0);
1969 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1970 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1971 CSR_WRITE(sc, WMREG_TDH, 0);
1972 CSR_WRITE(sc, WMREG_TDT, 0);
1973 CSR_WRITE(sc, WMREG_TIDV, 128);
1974
1975 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1976 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1977 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1978 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1979 }
1980 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1981 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1982
1983 /* Initialize the transmit job descriptors. */
1984 for (i = 0; i < WM_TXQUEUELEN; i++)
1985 sc->sc_txsoft[i].txs_mbuf = NULL;
1986 sc->sc_txsfree = WM_TXQUEUELEN;
1987 sc->sc_txsnext = 0;
1988 sc->sc_txsdirty = 0;
1989
1990 /*
1991 * Initialize the receive descriptor and receive job
1992 * descriptor rings.
1993 */
1994 if (sc->sc_type < WM_T_82543) {
1995 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1996 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1997 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1998 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1999 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2000 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2001
2002 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2003 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2004 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2005 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2006 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2007 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2008 } else {
2009 CSR_WRITE(sc, WMREG_RDBAH, 0);
2010 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2011 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2012 CSR_WRITE(sc, WMREG_RDH, 0);
2013 CSR_WRITE(sc, WMREG_RDT, 0);
2014 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2015 }
2016 for (i = 0; i < WM_NRXDESC; i++) {
2017 rxs = &sc->sc_rxsoft[i];
2018 if (rxs->rxs_mbuf == NULL) {
2019 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2020 printf("%s: unable to allocate or map rx "
2021 "buffer %d, error = %d\n",
2022 sc->sc_dev.dv_xname, i, error);
2023 /*
2024 * XXX Should attempt to run with fewer receive
2025 * XXX buffers instead of just failing.
2026 */
2027 wm_rxdrain(sc);
2028 goto out;
2029 }
2030 } else
2031 WM_INIT_RXDESC(sc, i);
2032 }
2033 sc->sc_rxptr = 0;
2034 sc->sc_rxdiscard = 0;
2035 WM_RXCHAIN_RESET(sc);
2036
2037 /*
2038 * Clear out the VLAN table -- we don't use it (yet).
2039 */
2040 CSR_WRITE(sc, WMREG_VET, 0);
2041 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2042 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2043
2044 /*
2045 * Set up flow-control parameters.
2046 *
2047 * XXX Values could probably stand some tuning.
2048 */
2049 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2050 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2051 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2052 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2053
2054 if (sc->sc_type < WM_T_82543) {
2055 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2056 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2057 } else {
2058 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2059 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2060 }
2061 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2062 }
2063
2064 #if 0 /* XXXJRT */
2065 /* Deal with VLAN enables. */
2066 if (sc->sc_ethercom.ec_nvlans != 0)
2067 sc->sc_ctrl |= CTRL_VME;
2068 else
2069 #endif /* XXXJRT */
2070 sc->sc_ctrl &= ~CTRL_VME;
2071
2072 /* Write the control registers. */
2073 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2074 #if 0
2075 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2076 #endif
2077
2078 /*
2079 * Set up checksum offload parameters.
2080 */
2081 reg = CSR_READ(sc, WMREG_RXCSUM);
2082 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2083 reg |= RXCSUM_IPOFL;
2084 else
2085 reg &= ~RXCSUM_IPOFL;
2086 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2087 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2088 else {
2089 reg &= ~RXCSUM_TUOFL;
2090 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2091 reg &= ~RXCSUM_IPOFL;
2092 }
2093 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2094
2095 /*
2096 * Set up the interrupt registers.
2097 */
2098 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2099 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2100 ICR_RXO | ICR_RXT0;
2101 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2102 sc->sc_icr |= ICR_RXCFG;
2103 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2104
2105 /* Set up the inter-packet gap. */
2106 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2107
2108 #if 0 /* XXXJRT */
2109 /* Set the VLAN ethernetype. */
2110 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2111 #endif
2112
2113 /*
2114 * Set up the transmit control register; we start out with
2115 * a collision distance suitable for FDX, but update it whe
2116 * we resolve the media type.
2117 */
2118 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2119 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2120 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2121
2122 /* Set the media. */
2123 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2124
2125 /*
2126 * Set up the receive control register; we actually program
2127 * the register when we set the receive filter. Use multicast
2128 * address offset type 0.
2129 *
2130 * Only the i82544 has the ability to strip the incoming
2131 * CRC, so we don't enable that feature.
2132 */
2133 sc->sc_mchash_type = 0;
2134 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2135 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2136
2137 /* Set the receive filter. */
2138 wm_set_filter(sc);
2139
2140 /* Start the one second link check clock. */
2141 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2142
2143 /* ...all done! */
2144 ifp->if_flags |= IFF_RUNNING;
2145 ifp->if_flags &= ~IFF_OACTIVE;
2146
2147 out:
2148 if (error)
2149 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2150 return (error);
2151 }
2152
2153 /*
2154 * wm_rxdrain:
2155 *
2156 * Drain the receive queue.
2157 */
2158 void
2159 wm_rxdrain(struct wm_softc *sc)
2160 {
2161 struct wm_rxsoft *rxs;
2162 int i;
2163
2164 for (i = 0; i < WM_NRXDESC; i++) {
2165 rxs = &sc->sc_rxsoft[i];
2166 if (rxs->rxs_mbuf != NULL) {
2167 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2168 m_freem(rxs->rxs_mbuf);
2169 rxs->rxs_mbuf = NULL;
2170 }
2171 }
2172 }
2173
2174 /*
2175 * wm_stop: [ifnet interface function]
2176 *
2177 * Stop transmission on the interface.
2178 */
2179 void
2180 wm_stop(struct ifnet *ifp, int disable)
2181 {
2182 struct wm_softc *sc = ifp->if_softc;
2183 struct wm_txsoft *txs;
2184 int i;
2185
2186 /* Stop the one second clock. */
2187 callout_stop(&sc->sc_tick_ch);
2188
2189 if (sc->sc_flags & WM_F_HAS_MII) {
2190 /* Down the MII. */
2191 mii_down(&sc->sc_mii);
2192 }
2193
2194 /* Stop the transmit and receive processes. */
2195 CSR_WRITE(sc, WMREG_TCTL, 0);
2196 CSR_WRITE(sc, WMREG_RCTL, 0);
2197
2198 /* Release any queued transmit buffers. */
2199 for (i = 0; i < WM_TXQUEUELEN; i++) {
2200 txs = &sc->sc_txsoft[i];
2201 if (txs->txs_mbuf != NULL) {
2202 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2203 m_freem(txs->txs_mbuf);
2204 txs->txs_mbuf = NULL;
2205 }
2206 }
2207
2208 if (disable)
2209 wm_rxdrain(sc);
2210
2211 /* Mark the interface as down and cancel the watchdog timer. */
2212 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2213 ifp->if_timer = 0;
2214 }
2215
2216 /*
2217 * wm_read_eeprom:
2218 *
2219 * Read data from the serial EEPROM.
2220 */
2221 void
2222 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2223 {
2224 uint32_t reg;
2225 int i, x, addrbits = 6;
2226
2227 for (i = 0; i < wordcnt; i++) {
2228 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2229 reg = CSR_READ(sc, WMREG_EECD);
2230
2231 /* Get number of address bits. */
2232 if (reg & EECD_EE_SIZE)
2233 addrbits = 8;
2234
2235 /* Request EEPROM access. */
2236 reg |= EECD_EE_REQ;
2237 CSR_WRITE(sc, WMREG_EECD, reg);
2238
2239 /* ..and wait for it to be granted. */
2240 for (x = 0; x < 100; x++) {
2241 reg = CSR_READ(sc, WMREG_EECD);
2242 if (reg & EECD_EE_GNT)
2243 break;
2244 delay(5);
2245 }
2246 if ((reg & EECD_EE_GNT) == 0) {
2247 printf("%s: could not acquire EEPROM GNT\n",
2248 sc->sc_dev.dv_xname);
2249 *data = 0xffff;
2250 reg &= ~EECD_EE_REQ;
2251 CSR_WRITE(sc, WMREG_EECD, reg);
2252 continue;
2253 }
2254 } else
2255 reg = 0;
2256
2257 /* Clear SK and DI. */
2258 reg &= ~(EECD_SK | EECD_DI);
2259 CSR_WRITE(sc, WMREG_EECD, reg);
2260
2261 /* Set CHIP SELECT. */
2262 reg |= EECD_CS;
2263 CSR_WRITE(sc, WMREG_EECD, reg);
2264 delay(2);
2265
2266 /* Shift in the READ command. */
2267 for (x = 3; x > 0; x--) {
2268 if (UWIRE_OPC_READ & (1 << (x - 1)))
2269 reg |= EECD_DI;
2270 else
2271 reg &= ~EECD_DI;
2272 CSR_WRITE(sc, WMREG_EECD, reg);
2273 delay(2);
2274 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2275 delay(2);
2276 CSR_WRITE(sc, WMREG_EECD, reg);
2277 delay(2);
2278 }
2279
2280 /* Shift in address. */
2281 for (x = addrbits; x > 0; x--) {
2282 if ((word + i) & (1 << (x - 1)))
2283 reg |= EECD_DI;
2284 else
2285 reg &= ~EECD_DI;
2286 CSR_WRITE(sc, WMREG_EECD, reg);
2287 delay(2);
2288 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2289 delay(2);
2290 CSR_WRITE(sc, WMREG_EECD, reg);
2291 delay(2);
2292 }
2293
2294 /* Shift out the data. */
2295 reg &= ~EECD_DI;
2296 data[i] = 0;
2297 for (x = 16; x > 0; x--) {
2298 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2299 delay(2);
2300 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2301 data[i] |= (1 << (x - 1));
2302 CSR_WRITE(sc, WMREG_EECD, reg);
2303 delay(2);
2304 }
2305
2306 /* Clear CHIP SELECT. */
2307 reg &= ~EECD_CS;
2308 CSR_WRITE(sc, WMREG_EECD, reg);
2309 delay(2);
2310
2311 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2312 /* Release the EEPROM. */
2313 reg &= ~EECD_EE_REQ;
2314 CSR_WRITE(sc, WMREG_EECD, reg);
2315 }
2316 }
2317 }
2318
2319 /*
2320 * wm_add_rxbuf:
2321 *
2322 * Add a receive buffer to the indiciated descriptor.
2323 */
2324 int
2325 wm_add_rxbuf(struct wm_softc *sc, int idx)
2326 {
2327 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2328 struct mbuf *m;
2329 int error;
2330
2331 MGETHDR(m, M_DONTWAIT, MT_DATA);
2332 if (m == NULL)
2333 return (ENOBUFS);
2334
2335 MCLGET(m, M_DONTWAIT);
2336 if ((m->m_flags & M_EXT) == 0) {
2337 m_freem(m);
2338 return (ENOBUFS);
2339 }
2340
2341 if (rxs->rxs_mbuf != NULL)
2342 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2343
2344 rxs->rxs_mbuf = m;
2345
2346 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2347 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2348 BUS_DMA_READ|BUS_DMA_NOWAIT);
2349 if (error) {
2350 printf("%s: unable to load rx DMA map %d, error = %d\n",
2351 sc->sc_dev.dv_xname, idx, error);
2352 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2353 }
2354
2355 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2356 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2357
2358 WM_INIT_RXDESC(sc, idx);
2359
2360 return (0);
2361 }
2362
2363 /*
2364 * wm_set_ral:
2365 *
2366 * Set an entery in the receive address list.
2367 */
2368 static void
2369 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2370 {
2371 uint32_t ral_lo, ral_hi;
2372
2373 if (enaddr != NULL) {
2374 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2375 (enaddr[3] << 24);
2376 ral_hi = enaddr[4] | (enaddr[5] << 8);
2377 ral_hi |= RAL_AV;
2378 } else {
2379 ral_lo = 0;
2380 ral_hi = 0;
2381 }
2382
2383 if (sc->sc_type >= WM_T_82544) {
2384 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2385 ral_lo);
2386 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2387 ral_hi);
2388 } else {
2389 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2390 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2391 }
2392 }
2393
2394 /*
2395 * wm_mchash:
2396 *
2397 * Compute the hash of the multicast address for the 4096-bit
2398 * multicast filter.
2399 */
2400 static uint32_t
2401 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2402 {
2403 static const int lo_shift[4] = { 4, 3, 2, 0 };
2404 static const int hi_shift[4] = { 4, 5, 6, 8 };
2405 uint32_t hash;
2406
2407 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2408 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2409
2410 return (hash & 0xfff);
2411 }
2412
2413 /*
2414 * wm_set_filter:
2415 *
2416 * Set up the receive filter.
2417 */
2418 void
2419 wm_set_filter(struct wm_softc *sc)
2420 {
2421 struct ethercom *ec = &sc->sc_ethercom;
2422 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2423 struct ether_multi *enm;
2424 struct ether_multistep step;
2425 bus_addr_t mta_reg;
2426 uint32_t hash, reg, bit;
2427 int i;
2428
2429 if (sc->sc_type >= WM_T_82544)
2430 mta_reg = WMREG_CORDOVA_MTA;
2431 else
2432 mta_reg = WMREG_MTA;
2433
2434 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2435
2436 if (ifp->if_flags & IFF_BROADCAST)
2437 sc->sc_rctl |= RCTL_BAM;
2438 if (ifp->if_flags & IFF_PROMISC) {
2439 sc->sc_rctl |= RCTL_UPE;
2440 goto allmulti;
2441 }
2442
2443 /*
2444 * Set the station address in the first RAL slot, and
2445 * clear the remaining slots.
2446 */
2447 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2448 for (i = 1; i < WM_RAL_TABSIZE; i++)
2449 wm_set_ral(sc, NULL, i);
2450
2451 /* Clear out the multicast table. */
2452 for (i = 0; i < WM_MC_TABSIZE; i++)
2453 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2454
2455 ETHER_FIRST_MULTI(step, ec, enm);
2456 while (enm != NULL) {
2457 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2458 /*
2459 * We must listen to a range of multicast addresses.
2460 * For now, just accept all multicasts, rather than
2461 * trying to set only those filter bits needed to match
2462 * the range. (At this time, the only use of address
2463 * ranges is for IP multicast routing, for which the
2464 * range is big enough to require all bits set.)
2465 */
2466 goto allmulti;
2467 }
2468
2469 hash = wm_mchash(sc, enm->enm_addrlo);
2470
2471 reg = (hash >> 5) & 0x7f;
2472 bit = hash & 0x1f;
2473
2474 hash = CSR_READ(sc, mta_reg + (reg << 2));
2475 hash |= 1U << bit;
2476
2477 /* XXX Hardware bug?? */
2478 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2479 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2480 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2481 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2482 } else
2483 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2484
2485 ETHER_NEXT_MULTI(step, enm);
2486 }
2487
2488 ifp->if_flags &= ~IFF_ALLMULTI;
2489 goto setit;
2490
2491 allmulti:
2492 ifp->if_flags |= IFF_ALLMULTI;
2493 sc->sc_rctl |= RCTL_MPE;
2494
2495 setit:
2496 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2497 }
2498
2499 /*
2500 * wm_tbi_mediainit:
2501 *
2502 * Initialize media for use on 1000BASE-X devices.
2503 */
2504 void
2505 wm_tbi_mediainit(struct wm_softc *sc)
2506 {
2507 const char *sep = "";
2508
2509 if (sc->sc_type < WM_T_82543)
2510 sc->sc_tipg = TIPG_WM_DFLT;
2511 else
2512 sc->sc_tipg = TIPG_LG_DFLT;
2513
2514 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2515 wm_tbi_mediastatus);
2516
2517 /*
2518 * SWD Pins:
2519 *
2520 * 0 = Link LED (output)
2521 * 1 = Loss Of Signal (input)
2522 */
2523 sc->sc_ctrl |= CTRL_SWDPIO(0);
2524 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2525
2526 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2527
2528 #define ADD(ss, mm, dd) \
2529 do { \
2530 printf("%s%s", sep, ss); \
2531 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2532 sep = ", "; \
2533 } while (/*CONSTCOND*/0)
2534
2535 printf("%s: ", sc->sc_dev.dv_xname);
2536 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2537 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2538 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2539 printf("\n");
2540
2541 #undef ADD
2542
2543 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2544 }
2545
2546 /*
2547 * wm_tbi_mediastatus: [ifmedia interface function]
2548 *
2549 * Get the current interface media status on a 1000BASE-X device.
2550 */
2551 void
2552 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2553 {
2554 struct wm_softc *sc = ifp->if_softc;
2555
2556 ifmr->ifm_status = IFM_AVALID;
2557 ifmr->ifm_active = IFM_ETHER;
2558
2559 if (sc->sc_tbi_linkup == 0) {
2560 ifmr->ifm_active |= IFM_NONE;
2561 return;
2562 }
2563
2564 ifmr->ifm_status |= IFM_ACTIVE;
2565 ifmr->ifm_active |= IFM_1000_SX;
2566 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2567 ifmr->ifm_active |= IFM_FDX;
2568 }
2569
2570 /*
2571 * wm_tbi_mediachange: [ifmedia interface function]
2572 *
2573 * Set hardware to newly-selected media on a 1000BASE-X device.
2574 */
2575 int
2576 wm_tbi_mediachange(struct ifnet *ifp)
2577 {
2578 struct wm_softc *sc = ifp->if_softc;
2579 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2580 uint32_t status;
2581 int i;
2582
2583 sc->sc_txcw = ife->ifm_data;
2584 if (sc->sc_ctrl & CTRL_RFCE)
2585 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2586 if (sc->sc_ctrl & CTRL_TFCE)
2587 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2588 sc->sc_txcw |= TXCW_ANE;
2589
2590 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2591 delay(10000);
2592
2593 sc->sc_tbi_anstate = 0;
2594
2595 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2596 /* Have signal; wait for the link to come up. */
2597 for (i = 0; i < 50; i++) {
2598 delay(10000);
2599 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2600 break;
2601 }
2602
2603 status = CSR_READ(sc, WMREG_STATUS);
2604 if (status & STATUS_LU) {
2605 /* Link is up. */
2606 DPRINTF(WM_DEBUG_LINK,
2607 ("%s: LINK: set media -> link up %s\n",
2608 sc->sc_dev.dv_xname,
2609 (status & STATUS_FD) ? "FDX" : "HDX"));
2610 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2611 if (status & STATUS_FD)
2612 sc->sc_tctl |=
2613 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2614 else
2615 sc->sc_tctl |=
2616 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2617 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2618 sc->sc_tbi_linkup = 1;
2619 } else {
2620 /* Link is down. */
2621 DPRINTF(WM_DEBUG_LINK,
2622 ("%s: LINK: set media -> link down\n",
2623 sc->sc_dev.dv_xname));
2624 sc->sc_tbi_linkup = 0;
2625 }
2626 } else {
2627 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2628 sc->sc_dev.dv_xname));
2629 sc->sc_tbi_linkup = 0;
2630 }
2631
2632 wm_tbi_set_linkled(sc);
2633
2634 return (0);
2635 }
2636
2637 /*
2638 * wm_tbi_set_linkled:
2639 *
2640 * Update the link LED on 1000BASE-X devices.
2641 */
2642 void
2643 wm_tbi_set_linkled(struct wm_softc *sc)
2644 {
2645
2646 if (sc->sc_tbi_linkup)
2647 sc->sc_ctrl |= CTRL_SWDPIN(0);
2648 else
2649 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2650
2651 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2652 }
2653
2654 /*
2655 * wm_tbi_check_link:
2656 *
2657 * Check the link on 1000BASE-X devices.
2658 */
2659 void
2660 wm_tbi_check_link(struct wm_softc *sc)
2661 {
2662 uint32_t rxcw, ctrl, status;
2663
2664 if (sc->sc_tbi_anstate == 0)
2665 return;
2666 else if (sc->sc_tbi_anstate > 1) {
2667 DPRINTF(WM_DEBUG_LINK,
2668 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2669 sc->sc_tbi_anstate));
2670 sc->sc_tbi_anstate--;
2671 return;
2672 }
2673
2674 sc->sc_tbi_anstate = 0;
2675
2676 rxcw = CSR_READ(sc, WMREG_RXCW);
2677 ctrl = CSR_READ(sc, WMREG_CTRL);
2678 status = CSR_READ(sc, WMREG_STATUS);
2679
2680 if ((status & STATUS_LU) == 0) {
2681 DPRINTF(WM_DEBUG_LINK,
2682 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2683 sc->sc_tbi_linkup = 0;
2684 } else {
2685 DPRINTF(WM_DEBUG_LINK,
2686 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2687 (status & STATUS_FD) ? "FDX" : "HDX"));
2688 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2689 if (status & STATUS_FD)
2690 sc->sc_tctl |=
2691 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2692 else
2693 sc->sc_tctl |=
2694 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2695 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2696 sc->sc_tbi_linkup = 1;
2697 }
2698
2699 wm_tbi_set_linkled(sc);
2700 }
2701
2702 /*
2703 * wm_gmii_reset:
2704 *
2705 * Reset the PHY.
2706 */
2707 void
2708 wm_gmii_reset(struct wm_softc *sc)
2709 {
2710 uint32_t reg;
2711
2712 if (sc->sc_type >= WM_T_82544) {
2713 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2714 delay(20000);
2715
2716 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2717 delay(20000);
2718 } else {
2719 /* The PHY reset pin is active-low. */
2720 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2721 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2722 CTRL_EXT_SWDPIN(4));
2723 reg |= CTRL_EXT_SWDPIO(4);
2724
2725 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2726 delay(10);
2727
2728 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2729 delay(10);
2730
2731 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2732 delay(10);
2733 #if 0
2734 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2735 #endif
2736 }
2737 }
2738
2739 /*
2740 * wm_gmii_mediainit:
2741 *
2742 * Initialize media for use on 1000BASE-T devices.
2743 */
2744 void
2745 wm_gmii_mediainit(struct wm_softc *sc)
2746 {
2747 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2748
2749 /* We have MII. */
2750 sc->sc_flags |= WM_F_HAS_MII;
2751
2752 sc->sc_tipg = TIPG_1000T_DFLT;
2753
2754 /*
2755 * Let the chip set speed/duplex on its own based on
2756 * signals from the PHY.
2757 */
2758 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2759 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2760
2761 /* Initialize our media structures and probe the GMII. */
2762 sc->sc_mii.mii_ifp = ifp;
2763
2764 if (sc->sc_type >= WM_T_82544) {
2765 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2766 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2767 } else {
2768 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2769 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2770 }
2771 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2772
2773 wm_gmii_reset(sc);
2774
2775 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2776 wm_gmii_mediastatus);
2777
2778 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2779 MII_OFFSET_ANY, 0);
2780 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2781 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2782 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2783 } else
2784 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2785 }
2786
2787 /*
2788 * wm_gmii_mediastatus: [ifmedia interface function]
2789 *
2790 * Get the current interface media status on a 1000BASE-T device.
2791 */
2792 void
2793 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2794 {
2795 struct wm_softc *sc = ifp->if_softc;
2796
2797 mii_pollstat(&sc->sc_mii);
2798 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2799 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2800 }
2801
2802 /*
2803 * wm_gmii_mediachange: [ifmedia interface function]
2804 *
2805 * Set hardware to newly-selected media on a 1000BASE-T device.
2806 */
2807 int
2808 wm_gmii_mediachange(struct ifnet *ifp)
2809 {
2810 struct wm_softc *sc = ifp->if_softc;
2811
2812 if (ifp->if_flags & IFF_UP)
2813 mii_mediachg(&sc->sc_mii);
2814 return (0);
2815 }
2816
2817 #define MDI_IO CTRL_SWDPIN(2)
2818 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2819 #define MDI_CLK CTRL_SWDPIN(3)
2820
2821 static void
2822 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2823 {
2824 uint32_t i, v;
2825
2826 v = CSR_READ(sc, WMREG_CTRL);
2827 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2828 v |= MDI_DIR | CTRL_SWDPIO(3);
2829
2830 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2831 if (data & i)
2832 v |= MDI_IO;
2833 else
2834 v &= ~MDI_IO;
2835 CSR_WRITE(sc, WMREG_CTRL, v);
2836 delay(10);
2837 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2838 delay(10);
2839 CSR_WRITE(sc, WMREG_CTRL, v);
2840 delay(10);
2841 }
2842 }
2843
2844 static uint32_t
2845 i82543_mii_recvbits(struct wm_softc *sc)
2846 {
2847 uint32_t v, i, data = 0;
2848
2849 v = CSR_READ(sc, WMREG_CTRL);
2850 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2851 v |= CTRL_SWDPIO(3);
2852
2853 CSR_WRITE(sc, WMREG_CTRL, v);
2854 delay(10);
2855 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2856 delay(10);
2857 CSR_WRITE(sc, WMREG_CTRL, v);
2858 delay(10);
2859
2860 for (i = 0; i < 16; i++) {
2861 data <<= 1;
2862 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2863 delay(10);
2864 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2865 data |= 1;
2866 CSR_WRITE(sc, WMREG_CTRL, v);
2867 delay(10);
2868 }
2869
2870 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2871 delay(10);
2872 CSR_WRITE(sc, WMREG_CTRL, v);
2873 delay(10);
2874
2875 return (data);
2876 }
2877
2878 #undef MDI_IO
2879 #undef MDI_DIR
2880 #undef MDI_CLK
2881
2882 /*
2883 * wm_gmii_i82543_readreg: [mii interface function]
2884 *
2885 * Read a PHY register on the GMII (i82543 version).
2886 */
2887 int
2888 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2889 {
2890 struct wm_softc *sc = (void *) self;
2891 int rv;
2892
2893 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2894 i82543_mii_sendbits(sc, reg | (phy << 5) |
2895 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2896 rv = i82543_mii_recvbits(sc) & 0xffff;
2897
2898 DPRINTF(WM_DEBUG_GMII,
2899 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2900 sc->sc_dev.dv_xname, phy, reg, rv));
2901
2902 return (rv);
2903 }
2904
2905 /*
2906 * wm_gmii_i82543_writereg: [mii interface function]
2907 *
2908 * Write a PHY register on the GMII (i82543 version).
2909 */
2910 void
2911 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2912 {
2913 struct wm_softc *sc = (void *) self;
2914
2915 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2916 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2917 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2918 (MII_COMMAND_START << 30), 32);
2919 }
2920
2921 /*
2922 * wm_gmii_i82544_readreg: [mii interface function]
2923 *
2924 * Read a PHY register on the GMII.
2925 */
2926 int
2927 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2928 {
2929 struct wm_softc *sc = (void *) self;
2930 uint32_t mdic;
2931 int i, rv;
2932
2933 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2934 MDIC_REGADD(reg));
2935
2936 for (i = 0; i < 100; i++) {
2937 mdic = CSR_READ(sc, WMREG_MDIC);
2938 if (mdic & MDIC_READY)
2939 break;
2940 delay(10);
2941 }
2942
2943 if ((mdic & MDIC_READY) == 0) {
2944 printf("%s: MDIC read timed out: phy %d reg %d\n",
2945 sc->sc_dev.dv_xname, phy, reg);
2946 rv = 0;
2947 } else if (mdic & MDIC_E) {
2948 #if 0 /* This is normal if no PHY is present. */
2949 printf("%s: MDIC read error: phy %d reg %d\n",
2950 sc->sc_dev.dv_xname, phy, reg);
2951 #endif
2952 rv = 0;
2953 } else {
2954 rv = MDIC_DATA(mdic);
2955 if (rv == 0xffff)
2956 rv = 0;
2957 }
2958
2959 return (rv);
2960 }
2961
2962 /*
2963 * wm_gmii_i82544_writereg: [mii interface function]
2964 *
2965 * Write a PHY register on the GMII.
2966 */
2967 void
2968 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2969 {
2970 struct wm_softc *sc = (void *) self;
2971 uint32_t mdic;
2972 int i;
2973
2974 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2975 MDIC_REGADD(reg) | MDIC_DATA(val));
2976
2977 for (i = 0; i < 100; i++) {
2978 mdic = CSR_READ(sc, WMREG_MDIC);
2979 if (mdic & MDIC_READY)
2980 break;
2981 delay(10);
2982 }
2983
2984 if ((mdic & MDIC_READY) == 0)
2985 printf("%s: MDIC write timed out: phy %d reg %d\n",
2986 sc->sc_dev.dv_xname, phy, reg);
2987 else if (mdic & MDIC_E)
2988 printf("%s: MDIC write error: phy %d reg %d\n",
2989 sc->sc_dev.dv_xname, phy, reg);
2990 }
2991
2992 /*
2993 * wm_gmii_statchg: [mii interface function]
2994 *
2995 * Callback from MII layer when media changes.
2996 */
2997 void
2998 wm_gmii_statchg(struct device *self)
2999 {
3000 struct wm_softc *sc = (void *) self;
3001
3002 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3003
3004 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3005 DPRINTF(WM_DEBUG_LINK,
3006 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3007 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3008 } else {
3009 DPRINTF(WM_DEBUG_LINK,
3010 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3011 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3012 }
3013
3014 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3015 }
3016