if_wm.c revision 1.38 1 /* $NetBSD: if_wm.c,v 1.38 2003/07/14 15:47:25 lukem Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.38 2003/07/14 15:47:25 lukem Exp $");
53
54 #include "bpfilter.h"
55 #include "rnd.h"
56
57 #include <sys/param.h>
58 #include <sys/systm.h>
59 #include <sys/callout.h>
60 #include <sys/mbuf.h>
61 #include <sys/malloc.h>
62 #include <sys/kernel.h>
63 #include <sys/socket.h>
64 #include <sys/ioctl.h>
65 #include <sys/errno.h>
66 #include <sys/device.h>
67 #include <sys/queue.h>
68
69 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
70
71 #if NRND > 0
72 #include <sys/rnd.h>
73 #endif
74
75 #include <net/if.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_ether.h>
79
80 #if NBPFILTER > 0
81 #include <net/bpf.h>
82 #endif
83
84 #include <netinet/in.h> /* XXX for struct ip */
85 #include <netinet/in_systm.h> /* XXX for struct ip */
86 #include <netinet/ip.h> /* XXX for struct ip */
87 #include <netinet/tcp.h> /* XXX for struct tcphdr */
88
89 #include <machine/bus.h>
90 #include <machine/intr.h>
91 #include <machine/endian.h>
92
93 #include <dev/mii/mii.h>
94 #include <dev/mii/miivar.h>
95 #include <dev/mii/mii_bitbang.h>
96
97 #include <dev/pci/pcireg.h>
98 #include <dev/pci/pcivar.h>
99 #include <dev/pci/pcidevs.h>
100
101 #include <dev/pci/if_wmreg.h>
102
103 #ifdef WM_DEBUG
104 #define WM_DEBUG_LINK 0x01
105 #define WM_DEBUG_TX 0x02
106 #define WM_DEBUG_RX 0x04
107 #define WM_DEBUG_GMII 0x08
108 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
109
110 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
111 #else
112 #define DPRINTF(x, y) /* nothing */
113 #endif /* WM_DEBUG */
114
115 /*
116 * Transmit descriptor list size. Due to errata, we can only have
117 * 256 hardware descriptors in the ring. We tell the upper layers
118 * that they can queue a lot of packets, and we go ahead and manage
119 * up to 64 of them at a time. We allow up to 16 DMA segments per
120 * packet.
121 */
122 #define WM_NTXSEGS 16
123 #define WM_IFQUEUELEN 256
124 #define WM_TXQUEUELEN 64
125 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
126 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
127 #define WM_NTXDESC 256
128 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
129 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
130 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
131
132 /*
133 * Receive descriptor list size. We have one Rx buffer for normal
134 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
135 * packet. We allocate 256 receive descriptors, each with a 2k
136 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
137 */
138 #define WM_NRXDESC 256
139 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
140 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
141 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
142
143 /*
144 * Control structures are DMA'd to the i82542 chip. We allocate them in
145 * a single clump that maps to a single DMA segment to make serveral things
146 * easier.
147 */
148 struct wm_control_data {
149 /*
150 * The transmit descriptors.
151 */
152 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
153
154 /*
155 * The receive descriptors.
156 */
157 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
158 };
159
160 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
161 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
162 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
163
164 /*
165 * Software state for transmit jobs.
166 */
167 struct wm_txsoft {
168 struct mbuf *txs_mbuf; /* head of our mbuf chain */
169 bus_dmamap_t txs_dmamap; /* our DMA map */
170 int txs_firstdesc; /* first descriptor in packet */
171 int txs_lastdesc; /* last descriptor in packet */
172 int txs_ndesc; /* # of descriptors used */
173 };
174
175 /*
176 * Software state for receive buffers. Each descriptor gets a
177 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
178 * more than one buffer, we chain them together.
179 */
180 struct wm_rxsoft {
181 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
182 bus_dmamap_t rxs_dmamap; /* our DMA map */
183 };
184
185 /*
186 * Software state per device.
187 */
188 struct wm_softc {
189 struct device sc_dev; /* generic device information */
190 bus_space_tag_t sc_st; /* bus space tag */
191 bus_space_handle_t sc_sh; /* bus space handle */
192 bus_dma_tag_t sc_dmat; /* bus DMA tag */
193 struct ethercom sc_ethercom; /* ethernet common data */
194 void *sc_sdhook; /* shutdown hook */
195
196 int sc_type; /* chip type; see below */
197 int sc_flags; /* flags; see below */
198
199 void *sc_ih; /* interrupt cookie */
200
201 struct mii_data sc_mii; /* MII/media information */
202
203 struct callout sc_tick_ch; /* tick callout */
204
205 bus_dmamap_t sc_cddmamap; /* control data DMA map */
206 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
207
208 /*
209 * Software state for the transmit and receive descriptors.
210 */
211 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
212 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
213
214 /*
215 * Control data structures.
216 */
217 struct wm_control_data *sc_control_data;
218 #define sc_txdescs sc_control_data->wcd_txdescs
219 #define sc_rxdescs sc_control_data->wcd_rxdescs
220
221 #ifdef WM_EVENT_COUNTERS
222 /* Event counters. */
223 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
224 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
225 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
226 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
227 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
228 struct evcnt sc_ev_rxintr; /* Rx interrupts */
229 struct evcnt sc_ev_linkintr; /* Link interrupts */
230
231 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
232 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
233 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
234 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
235
236 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
237 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
238 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
239
240 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
241 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
242
243 struct evcnt sc_ev_tu; /* Tx underrun */
244 #endif /* WM_EVENT_COUNTERS */
245
246 bus_addr_t sc_tdt_reg; /* offset of TDT register */
247
248 int sc_txfree; /* number of free Tx descriptors */
249 int sc_txnext; /* next ready Tx descriptor */
250
251 int sc_txsfree; /* number of free Tx jobs */
252 int sc_txsnext; /* next free Tx job */
253 int sc_txsdirty; /* dirty Tx jobs */
254
255 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
256 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
257
258 bus_addr_t sc_rdt_reg; /* offset of RDT register */
259
260 int sc_rxptr; /* next ready Rx descriptor/queue ent */
261 int sc_rxdiscard;
262 int sc_rxlen;
263 struct mbuf *sc_rxhead;
264 struct mbuf *sc_rxtail;
265 struct mbuf **sc_rxtailp;
266
267 uint32_t sc_ctrl; /* prototype CTRL register */
268 #if 0
269 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
270 #endif
271 uint32_t sc_icr; /* prototype interrupt bits */
272 uint32_t sc_tctl; /* prototype TCTL register */
273 uint32_t sc_rctl; /* prototype RCTL register */
274 uint32_t sc_txcw; /* prototype TXCW register */
275 uint32_t sc_tipg; /* prototype TIPG register */
276
277 int sc_tbi_linkup; /* TBI link status */
278 int sc_tbi_anstate; /* autonegotiation state */
279
280 int sc_mchash_type; /* multicast filter offset */
281
282 #if NRND > 0
283 rndsource_element_t rnd_source; /* random source */
284 #endif
285 };
286
287 #define WM_RXCHAIN_RESET(sc) \
288 do { \
289 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
290 *(sc)->sc_rxtailp = NULL; \
291 (sc)->sc_rxlen = 0; \
292 } while (/*CONSTCOND*/0)
293
294 #define WM_RXCHAIN_LINK(sc, m) \
295 do { \
296 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
297 (sc)->sc_rxtailp = &(m)->m_next; \
298 } while (/*CONSTCOND*/0)
299
300 /* sc_type */
301 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
302 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
303 #define WM_T_82543 2 /* i82543 */
304 #define WM_T_82544 3 /* i82544 */
305 #define WM_T_82540 4 /* i82540 */
306 #define WM_T_82545 5 /* i82545 */
307 #define WM_T_82546 6 /* i82546 */
308
309 /* sc_flags */
310 #define WM_F_HAS_MII 0x01 /* has MII */
311 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
312
313 #ifdef WM_EVENT_COUNTERS
314 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
315 #else
316 #define WM_EVCNT_INCR(ev) /* nothing */
317 #endif
318
319 #define CSR_READ(sc, reg) \
320 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
321 #define CSR_WRITE(sc, reg, val) \
322 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
323
324 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
325 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
326
327 #define WM_CDTXSYNC(sc, x, n, ops) \
328 do { \
329 int __x, __n; \
330 \
331 __x = (x); \
332 __n = (n); \
333 \
334 /* If it will wrap around, sync to the end of the ring. */ \
335 if ((__x + __n) > WM_NTXDESC) { \
336 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
337 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
338 (WM_NTXDESC - __x), (ops)); \
339 __n -= (WM_NTXDESC - __x); \
340 __x = 0; \
341 } \
342 \
343 /* Now sync whatever is left. */ \
344 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
345 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
346 } while (/*CONSTCOND*/0)
347
348 #define WM_CDRXSYNC(sc, x, ops) \
349 do { \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
352 } while (/*CONSTCOND*/0)
353
354 #define WM_INIT_RXDESC(sc, x) \
355 do { \
356 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
357 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
358 struct mbuf *__m = __rxs->rxs_mbuf; \
359 \
360 /* \
361 * Note: We scoot the packet forward 2 bytes in the buffer \
362 * so that the payload after the Ethernet header is aligned \
363 * to a 4-byte boundary. \
364 * \
365 * XXX BRAINDAMAGE ALERT! \
366 * The stupid chip uses the same size for every buffer, which \
367 * is set in the Receive Control register. We are using the 2K \
368 * size option, but what we REALLY want is (2K - 2)! For this \
369 * reason, we can't accept packets longer than the standard \
370 * Ethernet MTU, without incurring a big penalty to copy every \
371 * incoming packet to a new, suitably aligned buffer. \
372 * \
373 * We'll need to make some changes to the layer 3/4 parts of \
374 * the stack (to copy the headers to a new buffer if not \
375 * aligned) in order to support large MTU on this chip. Lame. \
376 */ \
377 __m->m_data = __m->m_ext.ext_buf + 2; \
378 \
379 __rxd->wrx_addr.wa_low = \
380 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
381 __rxd->wrx_addr.wa_high = 0; \
382 __rxd->wrx_len = 0; \
383 __rxd->wrx_cksum = 0; \
384 __rxd->wrx_status = 0; \
385 __rxd->wrx_errors = 0; \
386 __rxd->wrx_special = 0; \
387 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
388 \
389 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
390 } while (/*CONSTCOND*/0)
391
392 void wm_start(struct ifnet *);
393 void wm_watchdog(struct ifnet *);
394 int wm_ioctl(struct ifnet *, u_long, caddr_t);
395 int wm_init(struct ifnet *);
396 void wm_stop(struct ifnet *, int);
397
398 void wm_shutdown(void *);
399
400 void wm_reset(struct wm_softc *);
401 void wm_rxdrain(struct wm_softc *);
402 int wm_add_rxbuf(struct wm_softc *, int);
403 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
404 void wm_tick(void *);
405
406 void wm_set_filter(struct wm_softc *);
407
408 int wm_intr(void *);
409 void wm_txintr(struct wm_softc *);
410 void wm_rxintr(struct wm_softc *);
411 void wm_linkintr(struct wm_softc *, uint32_t);
412
413 void wm_tbi_mediainit(struct wm_softc *);
414 int wm_tbi_mediachange(struct ifnet *);
415 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
416
417 void wm_tbi_set_linkled(struct wm_softc *);
418 void wm_tbi_check_link(struct wm_softc *);
419
420 void wm_gmii_reset(struct wm_softc *);
421
422 int wm_gmii_i82543_readreg(struct device *, int, int);
423 void wm_gmii_i82543_writereg(struct device *, int, int, int);
424
425 int wm_gmii_i82544_readreg(struct device *, int, int);
426 void wm_gmii_i82544_writereg(struct device *, int, int, int);
427
428 void wm_gmii_statchg(struct device *);
429
430 void wm_gmii_mediainit(struct wm_softc *);
431 int wm_gmii_mediachange(struct ifnet *);
432 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
433
434 int wm_match(struct device *, struct cfdata *, void *);
435 void wm_attach(struct device *, struct device *, void *);
436
437 CFATTACH_DECL(wm, sizeof(struct wm_softc),
438 wm_match, wm_attach, NULL, NULL);
439
440 /*
441 * Devices supported by this driver.
442 */
443 const struct wm_product {
444 pci_vendor_id_t wmp_vendor;
445 pci_product_id_t wmp_product;
446 const char *wmp_name;
447 int wmp_type;
448 int wmp_flags;
449 #define WMP_F_1000X 0x01
450 #define WMP_F_1000T 0x02
451 } wm_products[] = {
452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
453 "Intel i82542 1000BASE-X Ethernet",
454 WM_T_82542_2_1, WMP_F_1000X },
455
456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
457 "Intel i82543GC 1000BASE-X Ethernet",
458 WM_T_82543, WMP_F_1000X },
459
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
461 "Intel i82543GC 1000BASE-T Ethernet",
462 WM_T_82543, WMP_F_1000T },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
465 "Intel i82544EI 1000BASE-T Ethernet",
466 WM_T_82544, WMP_F_1000T },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
469 "Intel i82544EI 1000BASE-X Ethernet",
470 WM_T_82544, WMP_F_1000X },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
473 "Intel i82544GC 1000BASE-T Ethernet",
474 WM_T_82544, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
477 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
478 WM_T_82544, WMP_F_1000T },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
481 "Intel i82540EM 1000BASE-T Ethernet",
482 WM_T_82540, WMP_F_1000T },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
485 "Intel i82540EP 1000BASE-T Ethernet",
486 WM_T_82540, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
489 "Intel i82540EP 1000BASE-T Ethernet",
490 WM_T_82540, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
493 "Intel i82540EP 1000BASE-T Ethernet",
494 WM_T_82540, WMP_F_1000T },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
497 "Intel i82545EM 1000BASE-T Ethernet",
498 WM_T_82545, WMP_F_1000T },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
501 "Intel i82546EB 1000BASE-T Ethernet",
502 WM_T_82546, WMP_F_1000T },
503
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
505 "Intel i82545EM 1000BASE-X Ethernet",
506 WM_T_82545, WMP_F_1000X },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
509 "Intel i82546EB 1000BASE-X Ethernet",
510 WM_T_82546, WMP_F_1000X },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
513 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
514 WM_T_82540, WMP_F_1000T },
515
516 { 0, 0,
517 NULL,
518 0, 0 },
519 };
520
521 #ifdef WM_EVENT_COUNTERS
522 #if WM_NTXSEGS != 16
523 #error Update wm_txseg_evcnt_names
524 #endif
525 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
526 "txseg1",
527 "txseg2",
528 "txseg3",
529 "txseg4",
530 "txseg5",
531 "txseg6",
532 "txseg7",
533 "txseg8",
534 "txseg9",
535 "txseg10",
536 "txseg11",
537 "txseg12",
538 "txseg13",
539 "txseg14",
540 "txseg15",
541 "txseg16",
542 };
543 #endif /* WM_EVENT_COUNTERS */
544
545 static const struct wm_product *
546 wm_lookup(const struct pci_attach_args *pa)
547 {
548 const struct wm_product *wmp;
549
550 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
551 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
552 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
553 return (wmp);
554 }
555 return (NULL);
556 }
557
558 int
559 wm_match(struct device *parent, struct cfdata *cf, void *aux)
560 {
561 struct pci_attach_args *pa = aux;
562
563 if (wm_lookup(pa) != NULL)
564 return (1);
565
566 return (0);
567 }
568
569 void
570 wm_attach(struct device *parent, struct device *self, void *aux)
571 {
572 struct wm_softc *sc = (void *) self;
573 struct pci_attach_args *pa = aux;
574 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
575 pci_chipset_tag_t pc = pa->pa_pc;
576 pci_intr_handle_t ih;
577 const char *intrstr = NULL;
578 bus_space_tag_t memt;
579 bus_space_handle_t memh;
580 bus_dma_segment_t seg;
581 int memh_valid;
582 int i, rseg, error;
583 const struct wm_product *wmp;
584 uint8_t enaddr[ETHER_ADDR_LEN];
585 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
586 pcireg_t preg, memtype;
587 int pmreg;
588
589 callout_init(&sc->sc_tick_ch);
590
591 wmp = wm_lookup(pa);
592 if (wmp == NULL) {
593 printf("\n");
594 panic("wm_attach: impossible");
595 }
596
597 sc->sc_dmat = pa->pa_dmat;
598
599 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
600 aprint_naive(": Ethernet controller\n");
601 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
602
603 sc->sc_type = wmp->wmp_type;
604 if (sc->sc_type < WM_T_82543) {
605 if (preg < 2) {
606 aprint_error("%s: i82542 must be at least rev. 2\n",
607 sc->sc_dev.dv_xname);
608 return;
609 }
610 if (preg < 3)
611 sc->sc_type = WM_T_82542_2_0;
612 }
613
614 /*
615 * Some chips require a handshake to access the EEPROM.
616 */
617 if (sc->sc_type >= WM_T_82540)
618 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
619
620 /*
621 * Map the device.
622 */
623 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
624 switch (memtype) {
625 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
626 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
627 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
628 memtype, 0, &memt, &memh, NULL, NULL) == 0);
629 break;
630 default:
631 memh_valid = 0;
632 }
633
634 if (memh_valid) {
635 sc->sc_st = memt;
636 sc->sc_sh = memh;
637 } else {
638 aprint_error("%s: unable to map device registers\n",
639 sc->sc_dev.dv_xname);
640 return;
641 }
642
643 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
644 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
645 preg |= PCI_COMMAND_MASTER_ENABLE;
646 if (sc->sc_type < WM_T_82542_2_1)
647 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
648 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
649
650 /* Get it out of power save mode, if needed. */
651 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
652 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
653 PCI_PMCSR_STATE_MASK;
654 if (preg == PCI_PMCSR_STATE_D3) {
655 /*
656 * The card has lost all configuration data in
657 * this state, so punt.
658 */
659 aprint_error("%s: unable to wake from power state D3\n",
660 sc->sc_dev.dv_xname);
661 return;
662 }
663 if (preg != PCI_PMCSR_STATE_D0) {
664 aprint_normal("%s: waking up from power state D%d\n",
665 sc->sc_dev.dv_xname, preg);
666 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
667 PCI_PMCSR_STATE_D0);
668 }
669 }
670
671 /*
672 * Map and establish our interrupt.
673 */
674 if (pci_intr_map(pa, &ih)) {
675 aprint_error("%s: unable to map interrupt\n",
676 sc->sc_dev.dv_xname);
677 return;
678 }
679 intrstr = pci_intr_string(pc, ih);
680 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
681 if (sc->sc_ih == NULL) {
682 aprint_error("%s: unable to establish interrupt",
683 sc->sc_dev.dv_xname);
684 if (intrstr != NULL)
685 aprint_normal(" at %s", intrstr);
686 aprint_normal("\n");
687 return;
688 }
689 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
690
691 /*
692 * Allocate the control data structures, and create and load the
693 * DMA map for it.
694 */
695 if ((error = bus_dmamem_alloc(sc->sc_dmat,
696 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
697 0)) != 0) {
698 aprint_error(
699 "%s: unable to allocate control data, error = %d\n",
700 sc->sc_dev.dv_xname, error);
701 goto fail_0;
702 }
703
704 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
705 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
706 0)) != 0) {
707 aprint_error("%s: unable to map control data, error = %d\n",
708 sc->sc_dev.dv_xname, error);
709 goto fail_1;
710 }
711
712 if ((error = bus_dmamap_create(sc->sc_dmat,
713 sizeof(struct wm_control_data), 1,
714 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
715 aprint_error("%s: unable to create control data DMA map, "
716 "error = %d\n", sc->sc_dev.dv_xname, error);
717 goto fail_2;
718 }
719
720 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
721 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
722 0)) != 0) {
723 aprint_error(
724 "%s: unable to load control data DMA map, error = %d\n",
725 sc->sc_dev.dv_xname, error);
726 goto fail_3;
727 }
728
729 /*
730 * Create the transmit buffer DMA maps.
731 */
732 for (i = 0; i < WM_TXQUEUELEN; i++) {
733 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
734 WM_NTXSEGS, MCLBYTES, 0, 0,
735 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
736 aprint_error("%s: unable to create Tx DMA map %d, "
737 "error = %d\n", sc->sc_dev.dv_xname, i, error);
738 goto fail_4;
739 }
740 }
741
742 /*
743 * Create the receive buffer DMA maps.
744 */
745 for (i = 0; i < WM_NRXDESC; i++) {
746 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
747 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
748 aprint_error("%s: unable to create Rx DMA map %d, "
749 "error = %d\n", sc->sc_dev.dv_xname, i, error);
750 goto fail_5;
751 }
752 sc->sc_rxsoft[i].rxs_mbuf = NULL;
753 }
754
755 /*
756 * Reset the chip to a known state.
757 */
758 wm_reset(sc);
759
760 /*
761 * Read the Ethernet address from the EEPROM.
762 */
763 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
764 sizeof(myea) / sizeof(myea[0]), myea);
765 enaddr[0] = myea[0] & 0xff;
766 enaddr[1] = myea[0] >> 8;
767 enaddr[2] = myea[1] & 0xff;
768 enaddr[3] = myea[1] >> 8;
769 enaddr[4] = myea[2] & 0xff;
770 enaddr[5] = myea[2] >> 8;
771
772 /*
773 * Toggle the LSB of the MAC address on the second port
774 * of the i82546.
775 */
776 if (sc->sc_type == WM_T_82546) {
777 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
778 enaddr[5] ^= 1;
779 }
780
781 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
782 ether_sprintf(enaddr));
783
784 /*
785 * Read the config info from the EEPROM, and set up various
786 * bits in the control registers based on their contents.
787 */
788 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
789 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
790 if (sc->sc_type >= WM_T_82544)
791 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
792
793 if (cfg1 & EEPROM_CFG1_ILOS)
794 sc->sc_ctrl |= CTRL_ILOS;
795 if (sc->sc_type >= WM_T_82544) {
796 sc->sc_ctrl |=
797 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
798 CTRL_SWDPIO_SHIFT;
799 sc->sc_ctrl |=
800 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
801 CTRL_SWDPINS_SHIFT;
802 } else {
803 sc->sc_ctrl |=
804 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
805 CTRL_SWDPIO_SHIFT;
806 }
807
808 #if 0
809 if (sc->sc_type >= WM_T_82544) {
810 if (cfg1 & EEPROM_CFG1_IPS0)
811 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
812 if (cfg1 & EEPROM_CFG1_IPS1)
813 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
814 sc->sc_ctrl_ext |=
815 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
816 CTRL_EXT_SWDPIO_SHIFT;
817 sc->sc_ctrl_ext |=
818 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
819 CTRL_EXT_SWDPINS_SHIFT;
820 } else {
821 sc->sc_ctrl_ext |=
822 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
823 CTRL_EXT_SWDPIO_SHIFT;
824 }
825 #endif
826
827 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
828 #if 0
829 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
830 #endif
831
832 /*
833 * Set up some register offsets that are different between
834 * the i82542 and the i82543 and later chips.
835 */
836 if (sc->sc_type < WM_T_82543) {
837 sc->sc_rdt_reg = WMREG_OLD_RDT0;
838 sc->sc_tdt_reg = WMREG_OLD_TDT;
839 } else {
840 sc->sc_rdt_reg = WMREG_RDT;
841 sc->sc_tdt_reg = WMREG_TDT;
842 }
843
844 /*
845 * Determine if we should use flow control. We should
846 * always use it, unless we're on a i82542 < 2.1.
847 */
848 if (sc->sc_type >= WM_T_82542_2_1)
849 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
850
851 /*
852 * Determine if we're TBI or GMII mode, and initialize the
853 * media structures accordingly.
854 */
855 if (sc->sc_type < WM_T_82543 ||
856 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
857 if (wmp->wmp_flags & WMP_F_1000T)
858 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
859 "product!\n", sc->sc_dev.dv_xname);
860 wm_tbi_mediainit(sc);
861 } else {
862 if (wmp->wmp_flags & WMP_F_1000X)
863 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
864 "product!\n", sc->sc_dev.dv_xname);
865 wm_gmii_mediainit(sc);
866 }
867
868 ifp = &sc->sc_ethercom.ec_if;
869 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
870 ifp->if_softc = sc;
871 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
872 ifp->if_ioctl = wm_ioctl;
873 ifp->if_start = wm_start;
874 ifp->if_watchdog = wm_watchdog;
875 ifp->if_init = wm_init;
876 ifp->if_stop = wm_stop;
877 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
878 IFQ_SET_READY(&ifp->if_snd);
879
880 /*
881 * If we're a i82543 or greater, we can support VLANs.
882 */
883 if (sc->sc_type >= WM_T_82543)
884 sc->sc_ethercom.ec_capabilities |=
885 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
886
887 /*
888 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
889 * on i82543 and later.
890 */
891 if (sc->sc_type >= WM_T_82543)
892 ifp->if_capabilities |=
893 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
894
895 /*
896 * Attach the interface.
897 */
898 if_attach(ifp);
899 ether_ifattach(ifp, enaddr);
900 #if NRND > 0
901 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
902 RND_TYPE_NET, 0);
903 #endif
904
905 #ifdef WM_EVENT_COUNTERS
906 /* Attach event counters. */
907 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
908 NULL, sc->sc_dev.dv_xname, "txsstall");
909 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
910 NULL, sc->sc_dev.dv_xname, "txdstall");
911 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
912 NULL, sc->sc_dev.dv_xname, "txforceintr");
913 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
914 NULL, sc->sc_dev.dv_xname, "txdw");
915 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
916 NULL, sc->sc_dev.dv_xname, "txqe");
917 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
918 NULL, sc->sc_dev.dv_xname, "rxintr");
919 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
920 NULL, sc->sc_dev.dv_xname, "linkintr");
921
922 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
923 NULL, sc->sc_dev.dv_xname, "rxipsum");
924 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
925 NULL, sc->sc_dev.dv_xname, "rxtusum");
926 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "txipsum");
928 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
929 NULL, sc->sc_dev.dv_xname, "txtusum");
930
931 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
932 NULL, sc->sc_dev.dv_xname, "txctx init");
933 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
934 NULL, sc->sc_dev.dv_xname, "txctx hit");
935 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txctx miss");
937
938 for (i = 0; i < WM_NTXSEGS; i++)
939 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
940 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
941
942 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
943 NULL, sc->sc_dev.dv_xname, "txdrop");
944
945 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
946 NULL, sc->sc_dev.dv_xname, "tu");
947 #endif /* WM_EVENT_COUNTERS */
948
949 /*
950 * Make sure the interface is shutdown during reboot.
951 */
952 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
953 if (sc->sc_sdhook == NULL)
954 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
955 sc->sc_dev.dv_xname);
956 return;
957
958 /*
959 * Free any resources we've allocated during the failed attach
960 * attempt. Do this in reverse order and fall through.
961 */
962 fail_5:
963 for (i = 0; i < WM_NRXDESC; i++) {
964 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
965 bus_dmamap_destroy(sc->sc_dmat,
966 sc->sc_rxsoft[i].rxs_dmamap);
967 }
968 fail_4:
969 for (i = 0; i < WM_TXQUEUELEN; i++) {
970 if (sc->sc_txsoft[i].txs_dmamap != NULL)
971 bus_dmamap_destroy(sc->sc_dmat,
972 sc->sc_txsoft[i].txs_dmamap);
973 }
974 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
975 fail_3:
976 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
977 fail_2:
978 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
979 sizeof(struct wm_control_data));
980 fail_1:
981 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
982 fail_0:
983 return;
984 }
985
986 /*
987 * wm_shutdown:
988 *
989 * Make sure the interface is stopped at reboot time.
990 */
991 void
992 wm_shutdown(void *arg)
993 {
994 struct wm_softc *sc = arg;
995
996 wm_stop(&sc->sc_ethercom.ec_if, 1);
997 }
998
999 /*
1000 * wm_tx_cksum:
1001 *
1002 * Set up TCP/IP checksumming parameters for the
1003 * specified packet.
1004 */
1005 static int
1006 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1007 uint32_t *fieldsp)
1008 {
1009 struct mbuf *m0 = txs->txs_mbuf;
1010 struct livengood_tcpip_ctxdesc *t;
1011 uint32_t fields = 0, ipcs, tucs;
1012 struct ip *ip;
1013 struct ether_header *eh;
1014 int offset, iphl;
1015
1016 /*
1017 * XXX It would be nice if the mbuf pkthdr had offset
1018 * fields for the protocol headers.
1019 */
1020
1021 eh = mtod(m0, struct ether_header *);
1022 switch (htons(eh->ether_type)) {
1023 case ETHERTYPE_IP:
1024 iphl = sizeof(struct ip);
1025 offset = ETHER_HDR_LEN;
1026 break;
1027
1028 case ETHERTYPE_VLAN:
1029 iphl = sizeof(struct ip);
1030 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1031 break;
1032
1033 default:
1034 /*
1035 * Don't support this protocol or encapsulation.
1036 */
1037 *fieldsp = 0;
1038 *cmdp = 0;
1039 return (0);
1040 }
1041
1042 if (m0->m_len < (offset + iphl)) {
1043 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1044 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1045 "packet dropped\n", sc->sc_dev.dv_xname);
1046 return (ENOMEM);
1047 }
1048 m0 = txs->txs_mbuf;
1049 }
1050
1051 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1052 iphl = ip->ip_hl << 2;
1053
1054 /*
1055 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1056 * offload feature, if we load the context descriptor, we
1057 * MUST provide valid values for IPCSS and TUCSS fields.
1058 */
1059
1060 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1061 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1062 fields |= htole32(WTX_IXSM);
1063 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1064 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1065 WTX_TCPIP_IPCSE(offset + iphl - 1));
1066 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1067 /* Use the cached value. */
1068 ipcs = sc->sc_txctx_ipcs;
1069 } else {
1070 /* Just initialize it to the likely value anyway. */
1071 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1072 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1073 WTX_TCPIP_IPCSE(offset + iphl - 1));
1074 }
1075
1076 offset += iphl;
1077
1078 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1079 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1080 fields |= htole32(WTX_TXSM);
1081 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1082 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1083 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1084 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1085 /* Use the cached value. */
1086 tucs = sc->sc_txctx_tucs;
1087 } else {
1088 /* Just initialize it to a valid TCP context. */
1089 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1090 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1091 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1092 }
1093
1094 if (sc->sc_txctx_ipcs == ipcs &&
1095 sc->sc_txctx_tucs == tucs) {
1096 /* Cached context is fine. */
1097 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1098 } else {
1099 /* Fill in the context descriptor. */
1100 #ifdef WM_EVENT_COUNTERS
1101 if (sc->sc_txctx_ipcs == 0xffffffff &&
1102 sc->sc_txctx_tucs == 0xffffffff)
1103 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1104 else
1105 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1106 #endif
1107 t = (struct livengood_tcpip_ctxdesc *)
1108 &sc->sc_txdescs[sc->sc_txnext];
1109 t->tcpip_ipcs = ipcs;
1110 t->tcpip_tucs = tucs;
1111 t->tcpip_cmdlen =
1112 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1113 t->tcpip_seg = 0;
1114 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1115
1116 sc->sc_txctx_ipcs = ipcs;
1117 sc->sc_txctx_tucs = tucs;
1118
1119 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1120 txs->txs_ndesc++;
1121 }
1122
1123 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1124 *fieldsp = fields;
1125
1126 return (0);
1127 }
1128
1129 /*
1130 * wm_start: [ifnet interface function]
1131 *
1132 * Start packet transmission on the interface.
1133 */
1134 void
1135 wm_start(struct ifnet *ifp)
1136 {
1137 struct wm_softc *sc = ifp->if_softc;
1138 struct mbuf *m0;
1139 #if 0 /* XXXJRT */
1140 struct m_tag *mtag;
1141 #endif
1142 struct wm_txsoft *txs;
1143 bus_dmamap_t dmamap;
1144 int error, nexttx, lasttx, ofree, seg;
1145 uint32_t cksumcmd, cksumfields;
1146
1147 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1148 return;
1149
1150 /*
1151 * Remember the previous number of free descriptors.
1152 */
1153 ofree = sc->sc_txfree;
1154
1155 /*
1156 * Loop through the send queue, setting up transmit descriptors
1157 * until we drain the queue, or use up all available transmit
1158 * descriptors.
1159 */
1160 for (;;) {
1161 /* Grab a packet off the queue. */
1162 IFQ_POLL(&ifp->if_snd, m0);
1163 if (m0 == NULL)
1164 break;
1165
1166 DPRINTF(WM_DEBUG_TX,
1167 ("%s: TX: have packet to transmit: %p\n",
1168 sc->sc_dev.dv_xname, m0));
1169
1170 /* Get a work queue entry. */
1171 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1172 wm_txintr(sc);
1173 if (sc->sc_txsfree == 0) {
1174 DPRINTF(WM_DEBUG_TX,
1175 ("%s: TX: no free job descriptors\n",
1176 sc->sc_dev.dv_xname));
1177 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1178 break;
1179 }
1180 }
1181
1182 txs = &sc->sc_txsoft[sc->sc_txsnext];
1183 dmamap = txs->txs_dmamap;
1184
1185 /*
1186 * Load the DMA map. If this fails, the packet either
1187 * didn't fit in the allotted number of segments, or we
1188 * were short on resources. For the too-many-segments
1189 * case, we simply report an error and drop the packet,
1190 * since we can't sanely copy a jumbo packet to a single
1191 * buffer.
1192 */
1193 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1194 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1195 if (error) {
1196 if (error == EFBIG) {
1197 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1198 printf("%s: Tx packet consumes too many "
1199 "DMA segments, dropping...\n",
1200 sc->sc_dev.dv_xname);
1201 IFQ_DEQUEUE(&ifp->if_snd, m0);
1202 m_freem(m0);
1203 continue;
1204 }
1205 /*
1206 * Short on resources, just stop for now.
1207 */
1208 DPRINTF(WM_DEBUG_TX,
1209 ("%s: TX: dmamap load failed: %d\n",
1210 sc->sc_dev.dv_xname, error));
1211 break;
1212 }
1213
1214 /*
1215 * Ensure we have enough descriptors free to describe
1216 * the packet. Note, we always reserve one descriptor
1217 * at the end of the ring due to the semantics of the
1218 * TDT register, plus one more in the event we need
1219 * to re-load checksum offload context.
1220 */
1221 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1222 /*
1223 * Not enough free descriptors to transmit this
1224 * packet. We haven't committed anything yet,
1225 * so just unload the DMA map, put the packet
1226 * pack on the queue, and punt. Notify the upper
1227 * layer that there are no more slots left.
1228 */
1229 DPRINTF(WM_DEBUG_TX,
1230 ("%s: TX: need %d descriptors, have %d\n",
1231 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1232 sc->sc_txfree - 1));
1233 ifp->if_flags |= IFF_OACTIVE;
1234 bus_dmamap_unload(sc->sc_dmat, dmamap);
1235 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1236 break;
1237 }
1238
1239 IFQ_DEQUEUE(&ifp->if_snd, m0);
1240
1241 /*
1242 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1243 */
1244
1245 /* Sync the DMA map. */
1246 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1247 BUS_DMASYNC_PREWRITE);
1248
1249 DPRINTF(WM_DEBUG_TX,
1250 ("%s: TX: packet has %d DMA segments\n",
1251 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1252
1253 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1254
1255 /*
1256 * Store a pointer to the packet so that we can free it
1257 * later.
1258 *
1259 * Initially, we consider the number of descriptors the
1260 * packet uses the number of DMA segments. This may be
1261 * incremented by 1 if we do checksum offload (a descriptor
1262 * is used to set the checksum context).
1263 */
1264 txs->txs_mbuf = m0;
1265 txs->txs_firstdesc = sc->sc_txnext;
1266 txs->txs_ndesc = dmamap->dm_nsegs;
1267
1268 /*
1269 * Set up checksum offload parameters for
1270 * this packet.
1271 */
1272 if (m0->m_pkthdr.csum_flags &
1273 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1274 if (wm_tx_cksum(sc, txs, &cksumcmd,
1275 &cksumfields) != 0) {
1276 /* Error message already displayed. */
1277 bus_dmamap_unload(sc->sc_dmat, dmamap);
1278 continue;
1279 }
1280 } else {
1281 cksumcmd = 0;
1282 cksumfields = 0;
1283 }
1284
1285 cksumcmd |= htole32(WTX_CMD_IDE);
1286
1287 /*
1288 * Initialize the transmit descriptor.
1289 */
1290 for (nexttx = sc->sc_txnext, seg = 0;
1291 seg < dmamap->dm_nsegs;
1292 seg++, nexttx = WM_NEXTTX(nexttx)) {
1293 /*
1294 * Note: we currently only use 32-bit DMA
1295 * addresses.
1296 */
1297 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1298 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1299 htole32(dmamap->dm_segs[seg].ds_addr);
1300 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1301 htole32(dmamap->dm_segs[seg].ds_len);
1302 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1303 cksumfields;
1304 lasttx = nexttx;
1305
1306 DPRINTF(WM_DEBUG_TX,
1307 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1308 sc->sc_dev.dv_xname, nexttx,
1309 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1310 (uint32_t) dmamap->dm_segs[seg].ds_len));
1311 }
1312
1313 /*
1314 * Set up the command byte on the last descriptor of
1315 * the packet. If we're in the interrupt delay window,
1316 * delay the interrupt.
1317 */
1318 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1319 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1320
1321 #if 0 /* XXXJRT */
1322 /*
1323 * If VLANs are enabled and the packet has a VLAN tag, set
1324 * up the descriptor to encapsulate the packet for us.
1325 *
1326 * This is only valid on the last descriptor of the packet.
1327 */
1328 if (sc->sc_ethercom.ec_nvlans != 0 &&
1329 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1330 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1331 htole32(WTX_CMD_VLE);
1332 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1333 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1334 }
1335 #endif /* XXXJRT */
1336
1337 txs->txs_lastdesc = lasttx;
1338
1339 DPRINTF(WM_DEBUG_TX,
1340 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1341 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1342
1343 /* Sync the descriptors we're using. */
1344 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1345 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1346
1347 /* Give the packet to the chip. */
1348 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1349
1350 DPRINTF(WM_DEBUG_TX,
1351 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1352
1353 DPRINTF(WM_DEBUG_TX,
1354 ("%s: TX: finished transmitting packet, job %d\n",
1355 sc->sc_dev.dv_xname, sc->sc_txsnext));
1356
1357 /* Advance the tx pointer. */
1358 sc->sc_txfree -= txs->txs_ndesc;
1359 sc->sc_txnext = nexttx;
1360
1361 sc->sc_txsfree--;
1362 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1363
1364 #if NBPFILTER > 0
1365 /* Pass the packet to any BPF listeners. */
1366 if (ifp->if_bpf)
1367 bpf_mtap(ifp->if_bpf, m0);
1368 #endif /* NBPFILTER > 0 */
1369 }
1370
1371 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1372 /* No more slots; notify upper layer. */
1373 ifp->if_flags |= IFF_OACTIVE;
1374 }
1375
1376 if (sc->sc_txfree != ofree) {
1377 /* Set a watchdog timer in case the chip flakes out. */
1378 ifp->if_timer = 5;
1379 }
1380 }
1381
1382 /*
1383 * wm_watchdog: [ifnet interface function]
1384 *
1385 * Watchdog timer handler.
1386 */
1387 void
1388 wm_watchdog(struct ifnet *ifp)
1389 {
1390 struct wm_softc *sc = ifp->if_softc;
1391
1392 /*
1393 * Since we're using delayed interrupts, sweep up
1394 * before we report an error.
1395 */
1396 wm_txintr(sc);
1397
1398 if (sc->sc_txfree != WM_NTXDESC) {
1399 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1400 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1401 sc->sc_txnext);
1402 ifp->if_oerrors++;
1403
1404 /* Reset the interface. */
1405 (void) wm_init(ifp);
1406 }
1407
1408 /* Try to get more packets going. */
1409 wm_start(ifp);
1410 }
1411
1412 /*
1413 * wm_ioctl: [ifnet interface function]
1414 *
1415 * Handle control requests from the operator.
1416 */
1417 int
1418 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1419 {
1420 struct wm_softc *sc = ifp->if_softc;
1421 struct ifreq *ifr = (struct ifreq *) data;
1422 int s, error;
1423
1424 s = splnet();
1425
1426 switch (cmd) {
1427 case SIOCSIFMEDIA:
1428 case SIOCGIFMEDIA:
1429 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1430 break;
1431
1432 default:
1433 error = ether_ioctl(ifp, cmd, data);
1434 if (error == ENETRESET) {
1435 /*
1436 * Multicast list has changed; set the hardware filter
1437 * accordingly.
1438 */
1439 wm_set_filter(sc);
1440 error = 0;
1441 }
1442 break;
1443 }
1444
1445 /* Try to get more packets going. */
1446 wm_start(ifp);
1447
1448 splx(s);
1449 return (error);
1450 }
1451
1452 /*
1453 * wm_intr:
1454 *
1455 * Interrupt service routine.
1456 */
1457 int
1458 wm_intr(void *arg)
1459 {
1460 struct wm_softc *sc = arg;
1461 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1462 uint32_t icr;
1463 int wantinit, handled = 0;
1464
1465 for (wantinit = 0; wantinit == 0;) {
1466 icr = CSR_READ(sc, WMREG_ICR);
1467 if ((icr & sc->sc_icr) == 0)
1468 break;
1469
1470 #if 0 /*NRND > 0*/
1471 if (RND_ENABLED(&sc->rnd_source))
1472 rnd_add_uint32(&sc->rnd_source, icr);
1473 #endif
1474
1475 handled = 1;
1476
1477 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1478 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1479 DPRINTF(WM_DEBUG_RX,
1480 ("%s: RX: got Rx intr 0x%08x\n",
1481 sc->sc_dev.dv_xname,
1482 icr & (ICR_RXDMT0|ICR_RXT0)));
1483 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1484 }
1485 #endif
1486 wm_rxintr(sc);
1487
1488 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1489 if (icr & ICR_TXDW) {
1490 DPRINTF(WM_DEBUG_TX,
1491 ("%s: TX: got TDXW interrupt\n",
1492 sc->sc_dev.dv_xname));
1493 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1494 }
1495 #endif
1496 wm_txintr(sc);
1497
1498 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1499 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1500 wm_linkintr(sc, icr);
1501 }
1502
1503 if (icr & ICR_RXO) {
1504 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1505 wantinit = 1;
1506 }
1507 }
1508
1509 if (handled) {
1510 if (wantinit)
1511 wm_init(ifp);
1512
1513 /* Try to get more packets going. */
1514 wm_start(ifp);
1515 }
1516
1517 return (handled);
1518 }
1519
1520 /*
1521 * wm_txintr:
1522 *
1523 * Helper; handle transmit interrupts.
1524 */
1525 void
1526 wm_txintr(struct wm_softc *sc)
1527 {
1528 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1529 struct wm_txsoft *txs;
1530 uint8_t status;
1531 int i;
1532
1533 ifp->if_flags &= ~IFF_OACTIVE;
1534
1535 /*
1536 * Go through the Tx list and free mbufs for those
1537 * frames which have been transmitted.
1538 */
1539 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1540 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1541 txs = &sc->sc_txsoft[i];
1542
1543 DPRINTF(WM_DEBUG_TX,
1544 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1545
1546 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1547 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1548
1549 status = le32toh(sc->sc_txdescs[
1550 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1551 if ((status & WTX_ST_DD) == 0) {
1552 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1553 BUS_DMASYNC_PREREAD);
1554 break;
1555 }
1556
1557 DPRINTF(WM_DEBUG_TX,
1558 ("%s: TX: job %d done: descs %d..%d\n",
1559 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1560 txs->txs_lastdesc));
1561
1562 /*
1563 * XXX We should probably be using the statistics
1564 * XXX registers, but I don't know if they exist
1565 * XXX on chips before the i82544.
1566 */
1567
1568 #ifdef WM_EVENT_COUNTERS
1569 if (status & WTX_ST_TU)
1570 WM_EVCNT_INCR(&sc->sc_ev_tu);
1571 #endif /* WM_EVENT_COUNTERS */
1572
1573 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1574 ifp->if_oerrors++;
1575 if (status & WTX_ST_LC)
1576 printf("%s: late collision\n",
1577 sc->sc_dev.dv_xname);
1578 else if (status & WTX_ST_EC) {
1579 ifp->if_collisions += 16;
1580 printf("%s: excessive collisions\n",
1581 sc->sc_dev.dv_xname);
1582 }
1583 } else
1584 ifp->if_opackets++;
1585
1586 sc->sc_txfree += txs->txs_ndesc;
1587 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1588 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1589 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1590 m_freem(txs->txs_mbuf);
1591 txs->txs_mbuf = NULL;
1592 }
1593
1594 /* Update the dirty transmit buffer pointer. */
1595 sc->sc_txsdirty = i;
1596 DPRINTF(WM_DEBUG_TX,
1597 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1598
1599 /*
1600 * If there are no more pending transmissions, cancel the watchdog
1601 * timer.
1602 */
1603 if (sc->sc_txsfree == WM_TXQUEUELEN)
1604 ifp->if_timer = 0;
1605 }
1606
1607 /*
1608 * wm_rxintr:
1609 *
1610 * Helper; handle receive interrupts.
1611 */
1612 void
1613 wm_rxintr(struct wm_softc *sc)
1614 {
1615 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1616 struct wm_rxsoft *rxs;
1617 struct mbuf *m;
1618 int i, len;
1619 uint8_t status, errors;
1620
1621 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1622 rxs = &sc->sc_rxsoft[i];
1623
1624 DPRINTF(WM_DEBUG_RX,
1625 ("%s: RX: checking descriptor %d\n",
1626 sc->sc_dev.dv_xname, i));
1627
1628 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1629
1630 status = sc->sc_rxdescs[i].wrx_status;
1631 errors = sc->sc_rxdescs[i].wrx_errors;
1632 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1633
1634 if ((status & WRX_ST_DD) == 0) {
1635 /*
1636 * We have processed all of the receive descriptors.
1637 */
1638 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1639 break;
1640 }
1641
1642 if (__predict_false(sc->sc_rxdiscard)) {
1643 DPRINTF(WM_DEBUG_RX,
1644 ("%s: RX: discarding contents of descriptor %d\n",
1645 sc->sc_dev.dv_xname, i));
1646 WM_INIT_RXDESC(sc, i);
1647 if (status & WRX_ST_EOP) {
1648 /* Reset our state. */
1649 DPRINTF(WM_DEBUG_RX,
1650 ("%s: RX: resetting rxdiscard -> 0\n",
1651 sc->sc_dev.dv_xname));
1652 sc->sc_rxdiscard = 0;
1653 }
1654 continue;
1655 }
1656
1657 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1658 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1659
1660 m = rxs->rxs_mbuf;
1661
1662 /*
1663 * Add a new receive buffer to the ring.
1664 */
1665 if (wm_add_rxbuf(sc, i) != 0) {
1666 /*
1667 * Failed, throw away what we've done so
1668 * far, and discard the rest of the packet.
1669 */
1670 ifp->if_ierrors++;
1671 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1672 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1673 WM_INIT_RXDESC(sc, i);
1674 if ((status & WRX_ST_EOP) == 0)
1675 sc->sc_rxdiscard = 1;
1676 if (sc->sc_rxhead != NULL)
1677 m_freem(sc->sc_rxhead);
1678 WM_RXCHAIN_RESET(sc);
1679 DPRINTF(WM_DEBUG_RX,
1680 ("%s: RX: Rx buffer allocation failed, "
1681 "dropping packet%s\n", sc->sc_dev.dv_xname,
1682 sc->sc_rxdiscard ? " (discard)" : ""));
1683 continue;
1684 }
1685
1686 WM_RXCHAIN_LINK(sc, m);
1687
1688 m->m_len = len;
1689
1690 DPRINTF(WM_DEBUG_RX,
1691 ("%s: RX: buffer at %p len %d\n",
1692 sc->sc_dev.dv_xname, m->m_data, len));
1693
1694 /*
1695 * If this is not the end of the packet, keep
1696 * looking.
1697 */
1698 if ((status & WRX_ST_EOP) == 0) {
1699 sc->sc_rxlen += len;
1700 DPRINTF(WM_DEBUG_RX,
1701 ("%s: RX: not yet EOP, rxlen -> %d\n",
1702 sc->sc_dev.dv_xname, sc->sc_rxlen));
1703 continue;
1704 }
1705
1706 /*
1707 * Okay, we have the entire packet now...
1708 */
1709 *sc->sc_rxtailp = NULL;
1710 m = sc->sc_rxhead;
1711 len += sc->sc_rxlen;
1712
1713 WM_RXCHAIN_RESET(sc);
1714
1715 DPRINTF(WM_DEBUG_RX,
1716 ("%s: RX: have entire packet, len -> %d\n",
1717 sc->sc_dev.dv_xname, len));
1718
1719 /*
1720 * If an error occurred, update stats and drop the packet.
1721 */
1722 if (errors &
1723 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1724 ifp->if_ierrors++;
1725 if (errors & WRX_ER_SE)
1726 printf("%s: symbol error\n",
1727 sc->sc_dev.dv_xname);
1728 else if (errors & WRX_ER_SEQ)
1729 printf("%s: receive sequence error\n",
1730 sc->sc_dev.dv_xname);
1731 else if (errors & WRX_ER_CE)
1732 printf("%s: CRC error\n",
1733 sc->sc_dev.dv_xname);
1734 m_freem(m);
1735 continue;
1736 }
1737
1738 /*
1739 * No errors. Receive the packet.
1740 *
1741 * Note, we have configured the chip to include the
1742 * CRC with every packet.
1743 */
1744 m->m_flags |= M_HASFCS;
1745 m->m_pkthdr.rcvif = ifp;
1746 m->m_pkthdr.len = len;
1747
1748 #if 0 /* XXXJRT */
1749 /*
1750 * If VLANs are enabled, VLAN packets have been unwrapped
1751 * for us. Associate the tag with the packet.
1752 */
1753 if (sc->sc_ethercom.ec_nvlans != 0 &&
1754 (status & WRX_ST_VP) != 0) {
1755 struct m_tag *vtag;
1756
1757 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1758 M_NOWAIT);
1759 if (vtag == NULL) {
1760 ifp->if_ierrors++;
1761 printf("%s: unable to allocate VLAN tag\n",
1762 sc->sc_dev.dv_xname);
1763 m_freem(m);
1764 continue;
1765 }
1766
1767 *(u_int *)(vtag + 1) =
1768 le16toh(sc->sc_rxdescs[i].wrx_special);
1769 }
1770 #endif /* XXXJRT */
1771
1772 /*
1773 * Set up checksum info for this packet.
1774 */
1775 if (status & WRX_ST_IPCS) {
1776 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1777 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1778 if (errors & WRX_ER_IPE)
1779 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1780 }
1781 if (status & WRX_ST_TCPCS) {
1782 /*
1783 * Note: we don't know if this was TCP or UDP,
1784 * so we just set both bits, and expect the
1785 * upper layers to deal.
1786 */
1787 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1788 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1789 if (errors & WRX_ER_TCPE)
1790 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1791 }
1792
1793 ifp->if_ipackets++;
1794
1795 #if NBPFILTER > 0
1796 /* Pass this up to any BPF listeners. */
1797 if (ifp->if_bpf)
1798 bpf_mtap(ifp->if_bpf, m);
1799 #endif /* NBPFILTER > 0 */
1800
1801 /* Pass it on. */
1802 (*ifp->if_input)(ifp, m);
1803 }
1804
1805 /* Update the receive pointer. */
1806 sc->sc_rxptr = i;
1807
1808 DPRINTF(WM_DEBUG_RX,
1809 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1810 }
1811
1812 /*
1813 * wm_linkintr:
1814 *
1815 * Helper; handle link interrupts.
1816 */
1817 void
1818 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1819 {
1820 uint32_t status;
1821
1822 /*
1823 * If we get a link status interrupt on a 1000BASE-T
1824 * device, just fall into the normal MII tick path.
1825 */
1826 if (sc->sc_flags & WM_F_HAS_MII) {
1827 if (icr & ICR_LSC) {
1828 DPRINTF(WM_DEBUG_LINK,
1829 ("%s: LINK: LSC -> mii_tick\n",
1830 sc->sc_dev.dv_xname));
1831 mii_tick(&sc->sc_mii);
1832 } else if (icr & ICR_RXSEQ) {
1833 DPRINTF(WM_DEBUG_LINK,
1834 ("%s: LINK Receive sequence error\n",
1835 sc->sc_dev.dv_xname));
1836 }
1837 return;
1838 }
1839
1840 /*
1841 * If we are now receiving /C/, check for link again in
1842 * a couple of link clock ticks.
1843 */
1844 if (icr & ICR_RXCFG) {
1845 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1846 sc->sc_dev.dv_xname));
1847 sc->sc_tbi_anstate = 2;
1848 }
1849
1850 if (icr & ICR_LSC) {
1851 status = CSR_READ(sc, WMREG_STATUS);
1852 if (status & STATUS_LU) {
1853 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1854 sc->sc_dev.dv_xname,
1855 (status & STATUS_FD) ? "FDX" : "HDX"));
1856 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1857 if (status & STATUS_FD)
1858 sc->sc_tctl |=
1859 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1860 else
1861 sc->sc_tctl |=
1862 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1863 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1864 sc->sc_tbi_linkup = 1;
1865 } else {
1866 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1867 sc->sc_dev.dv_xname));
1868 sc->sc_tbi_linkup = 0;
1869 }
1870 sc->sc_tbi_anstate = 2;
1871 wm_tbi_set_linkled(sc);
1872 } else if (icr & ICR_RXSEQ) {
1873 DPRINTF(WM_DEBUG_LINK,
1874 ("%s: LINK: Receive sequence error\n",
1875 sc->sc_dev.dv_xname));
1876 }
1877 }
1878
1879 /*
1880 * wm_tick:
1881 *
1882 * One second timer, used to check link status, sweep up
1883 * completed transmit jobs, etc.
1884 */
1885 void
1886 wm_tick(void *arg)
1887 {
1888 struct wm_softc *sc = arg;
1889 int s;
1890
1891 s = splnet();
1892
1893 if (sc->sc_flags & WM_F_HAS_MII)
1894 mii_tick(&sc->sc_mii);
1895 else
1896 wm_tbi_check_link(sc);
1897
1898 splx(s);
1899
1900 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1901 }
1902
1903 /*
1904 * wm_reset:
1905 *
1906 * Reset the i82542 chip.
1907 */
1908 void
1909 wm_reset(struct wm_softc *sc)
1910 {
1911 int i;
1912
1913 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1914 delay(10000);
1915
1916 for (i = 0; i < 1000; i++) {
1917 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1918 return;
1919 delay(20);
1920 }
1921
1922 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1923 printf("%s: WARNING: reset failed to complete\n",
1924 sc->sc_dev.dv_xname);
1925 }
1926
1927 /*
1928 * wm_init: [ifnet interface function]
1929 *
1930 * Initialize the interface. Must be called at splnet().
1931 */
1932 int
1933 wm_init(struct ifnet *ifp)
1934 {
1935 struct wm_softc *sc = ifp->if_softc;
1936 struct wm_rxsoft *rxs;
1937 int i, error = 0;
1938 uint32_t reg;
1939
1940 /* Cancel any pending I/O. */
1941 wm_stop(ifp, 0);
1942
1943 /* Reset the chip to a known state. */
1944 wm_reset(sc);
1945
1946 /* Initialize the transmit descriptor ring. */
1947 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1948 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1949 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1950 sc->sc_txfree = WM_NTXDESC;
1951 sc->sc_txnext = 0;
1952
1953 sc->sc_txctx_ipcs = 0xffffffff;
1954 sc->sc_txctx_tucs = 0xffffffff;
1955
1956 if (sc->sc_type < WM_T_82543) {
1957 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1958 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1959 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1960 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1961 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1962 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1963 } else {
1964 CSR_WRITE(sc, WMREG_TBDAH, 0);
1965 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1966 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1967 CSR_WRITE(sc, WMREG_TDH, 0);
1968 CSR_WRITE(sc, WMREG_TDT, 0);
1969 CSR_WRITE(sc, WMREG_TIDV, 128);
1970
1971 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1972 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1973 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1974 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1975 }
1976 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1977 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1978
1979 /* Initialize the transmit job descriptors. */
1980 for (i = 0; i < WM_TXQUEUELEN; i++)
1981 sc->sc_txsoft[i].txs_mbuf = NULL;
1982 sc->sc_txsfree = WM_TXQUEUELEN;
1983 sc->sc_txsnext = 0;
1984 sc->sc_txsdirty = 0;
1985
1986 /*
1987 * Initialize the receive descriptor and receive job
1988 * descriptor rings.
1989 */
1990 if (sc->sc_type < WM_T_82543) {
1991 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1992 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1993 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1994 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1995 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1996 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1997
1998 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1999 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2000 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2001 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2002 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2003 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2004 } else {
2005 CSR_WRITE(sc, WMREG_RDBAH, 0);
2006 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2007 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2008 CSR_WRITE(sc, WMREG_RDH, 0);
2009 CSR_WRITE(sc, WMREG_RDT, 0);
2010 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2011 }
2012 for (i = 0; i < WM_NRXDESC; i++) {
2013 rxs = &sc->sc_rxsoft[i];
2014 if (rxs->rxs_mbuf == NULL) {
2015 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2016 printf("%s: unable to allocate or map rx "
2017 "buffer %d, error = %d\n",
2018 sc->sc_dev.dv_xname, i, error);
2019 /*
2020 * XXX Should attempt to run with fewer receive
2021 * XXX buffers instead of just failing.
2022 */
2023 wm_rxdrain(sc);
2024 goto out;
2025 }
2026 } else
2027 WM_INIT_RXDESC(sc, i);
2028 }
2029 sc->sc_rxptr = 0;
2030 sc->sc_rxdiscard = 0;
2031 WM_RXCHAIN_RESET(sc);
2032
2033 /*
2034 * Clear out the VLAN table -- we don't use it (yet).
2035 */
2036 CSR_WRITE(sc, WMREG_VET, 0);
2037 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2038 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2039
2040 /*
2041 * Set up flow-control parameters.
2042 *
2043 * XXX Values could probably stand some tuning.
2044 */
2045 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2046 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2047 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2048 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2049
2050 if (sc->sc_type < WM_T_82543) {
2051 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2052 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2053 } else {
2054 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2055 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2056 }
2057 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2058 }
2059
2060 #if 0 /* XXXJRT */
2061 /* Deal with VLAN enables. */
2062 if (sc->sc_ethercom.ec_nvlans != 0)
2063 sc->sc_ctrl |= CTRL_VME;
2064 else
2065 #endif /* XXXJRT */
2066 sc->sc_ctrl &= ~CTRL_VME;
2067
2068 /* Write the control registers. */
2069 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2070 #if 0
2071 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2072 #endif
2073
2074 /*
2075 * Set up checksum offload parameters.
2076 */
2077 reg = CSR_READ(sc, WMREG_RXCSUM);
2078 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2079 reg |= RXCSUM_IPOFL;
2080 else
2081 reg &= ~RXCSUM_IPOFL;
2082 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2083 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2084 else {
2085 reg &= ~RXCSUM_TUOFL;
2086 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2087 reg &= ~RXCSUM_IPOFL;
2088 }
2089 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2090
2091 /*
2092 * Set up the interrupt registers.
2093 */
2094 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2095 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2096 ICR_RXO | ICR_RXT0;
2097 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2098 sc->sc_icr |= ICR_RXCFG;
2099 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2100
2101 /* Set up the inter-packet gap. */
2102 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2103
2104 #if 0 /* XXXJRT */
2105 /* Set the VLAN ethernetype. */
2106 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2107 #endif
2108
2109 /*
2110 * Set up the transmit control register; we start out with
2111 * a collision distance suitable for FDX, but update it whe
2112 * we resolve the media type.
2113 */
2114 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2115 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2116 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2117
2118 /* Set the media. */
2119 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2120
2121 /*
2122 * Set up the receive control register; we actually program
2123 * the register when we set the receive filter. Use multicast
2124 * address offset type 0.
2125 *
2126 * Only the i82544 has the ability to strip the incoming
2127 * CRC, so we don't enable that feature.
2128 */
2129 sc->sc_mchash_type = 0;
2130 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2131 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2132
2133 /* Set the receive filter. */
2134 wm_set_filter(sc);
2135
2136 /* Start the one second link check clock. */
2137 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2138
2139 /* ...all done! */
2140 ifp->if_flags |= IFF_RUNNING;
2141 ifp->if_flags &= ~IFF_OACTIVE;
2142
2143 out:
2144 if (error)
2145 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2146 return (error);
2147 }
2148
2149 /*
2150 * wm_rxdrain:
2151 *
2152 * Drain the receive queue.
2153 */
2154 void
2155 wm_rxdrain(struct wm_softc *sc)
2156 {
2157 struct wm_rxsoft *rxs;
2158 int i;
2159
2160 for (i = 0; i < WM_NRXDESC; i++) {
2161 rxs = &sc->sc_rxsoft[i];
2162 if (rxs->rxs_mbuf != NULL) {
2163 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2164 m_freem(rxs->rxs_mbuf);
2165 rxs->rxs_mbuf = NULL;
2166 }
2167 }
2168 }
2169
2170 /*
2171 * wm_stop: [ifnet interface function]
2172 *
2173 * Stop transmission on the interface.
2174 */
2175 void
2176 wm_stop(struct ifnet *ifp, int disable)
2177 {
2178 struct wm_softc *sc = ifp->if_softc;
2179 struct wm_txsoft *txs;
2180 int i;
2181
2182 /* Stop the one second clock. */
2183 callout_stop(&sc->sc_tick_ch);
2184
2185 if (sc->sc_flags & WM_F_HAS_MII) {
2186 /* Down the MII. */
2187 mii_down(&sc->sc_mii);
2188 }
2189
2190 /* Stop the transmit and receive processes. */
2191 CSR_WRITE(sc, WMREG_TCTL, 0);
2192 CSR_WRITE(sc, WMREG_RCTL, 0);
2193
2194 /* Release any queued transmit buffers. */
2195 for (i = 0; i < WM_TXQUEUELEN; i++) {
2196 txs = &sc->sc_txsoft[i];
2197 if (txs->txs_mbuf != NULL) {
2198 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2199 m_freem(txs->txs_mbuf);
2200 txs->txs_mbuf = NULL;
2201 }
2202 }
2203
2204 if (disable)
2205 wm_rxdrain(sc);
2206
2207 /* Mark the interface as down and cancel the watchdog timer. */
2208 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2209 ifp->if_timer = 0;
2210 }
2211
2212 /*
2213 * wm_read_eeprom:
2214 *
2215 * Read data from the serial EEPROM.
2216 */
2217 void
2218 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2219 {
2220 uint32_t reg;
2221 int i, x, addrbits = 6;
2222
2223 for (i = 0; i < wordcnt; i++) {
2224 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2225 reg = CSR_READ(sc, WMREG_EECD);
2226
2227 /* Get number of address bits. */
2228 if (reg & EECD_EE_SIZE)
2229 addrbits = 8;
2230
2231 /* Request EEPROM access. */
2232 reg |= EECD_EE_REQ;
2233 CSR_WRITE(sc, WMREG_EECD, reg);
2234
2235 /* ..and wait for it to be granted. */
2236 for (x = 0; x < 100; x++) {
2237 reg = CSR_READ(sc, WMREG_EECD);
2238 if (reg & EECD_EE_GNT)
2239 break;
2240 delay(5);
2241 }
2242 if ((reg & EECD_EE_GNT) == 0) {
2243 printf("%s: could not acquire EEPROM GNT\n",
2244 sc->sc_dev.dv_xname);
2245 *data = 0xffff;
2246 reg &= ~EECD_EE_REQ;
2247 CSR_WRITE(sc, WMREG_EECD, reg);
2248 continue;
2249 }
2250 } else
2251 reg = 0;
2252
2253 /* Clear SK and DI. */
2254 reg &= ~(EECD_SK | EECD_DI);
2255 CSR_WRITE(sc, WMREG_EECD, reg);
2256
2257 /* Set CHIP SELECT. */
2258 reg |= EECD_CS;
2259 CSR_WRITE(sc, WMREG_EECD, reg);
2260 delay(2);
2261
2262 /* Shift in the READ command. */
2263 for (x = 3; x > 0; x--) {
2264 if (UWIRE_OPC_READ & (1 << (x - 1)))
2265 reg |= EECD_DI;
2266 else
2267 reg &= ~EECD_DI;
2268 CSR_WRITE(sc, WMREG_EECD, reg);
2269 delay(2);
2270 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2271 delay(2);
2272 CSR_WRITE(sc, WMREG_EECD, reg);
2273 delay(2);
2274 }
2275
2276 /* Shift in address. */
2277 for (x = addrbits; x > 0; x--) {
2278 if ((word + i) & (1 << (x - 1)))
2279 reg |= EECD_DI;
2280 else
2281 reg &= ~EECD_DI;
2282 CSR_WRITE(sc, WMREG_EECD, reg);
2283 delay(2);
2284 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2285 delay(2);
2286 CSR_WRITE(sc, WMREG_EECD, reg);
2287 delay(2);
2288 }
2289
2290 /* Shift out the data. */
2291 reg &= ~EECD_DI;
2292 data[i] = 0;
2293 for (x = 16; x > 0; x--) {
2294 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2295 delay(2);
2296 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2297 data[i] |= (1 << (x - 1));
2298 CSR_WRITE(sc, WMREG_EECD, reg);
2299 delay(2);
2300 }
2301
2302 /* Clear CHIP SELECT. */
2303 reg &= ~EECD_CS;
2304 CSR_WRITE(sc, WMREG_EECD, reg);
2305 delay(2);
2306
2307 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2308 /* Release the EEPROM. */
2309 reg &= ~EECD_EE_REQ;
2310 CSR_WRITE(sc, WMREG_EECD, reg);
2311 }
2312 }
2313 }
2314
2315 /*
2316 * wm_add_rxbuf:
2317 *
2318 * Add a receive buffer to the indiciated descriptor.
2319 */
2320 int
2321 wm_add_rxbuf(struct wm_softc *sc, int idx)
2322 {
2323 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2324 struct mbuf *m;
2325 int error;
2326
2327 MGETHDR(m, M_DONTWAIT, MT_DATA);
2328 if (m == NULL)
2329 return (ENOBUFS);
2330
2331 MCLGET(m, M_DONTWAIT);
2332 if ((m->m_flags & M_EXT) == 0) {
2333 m_freem(m);
2334 return (ENOBUFS);
2335 }
2336
2337 if (rxs->rxs_mbuf != NULL)
2338 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2339
2340 rxs->rxs_mbuf = m;
2341
2342 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2343 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2344 BUS_DMA_READ|BUS_DMA_NOWAIT);
2345 if (error) {
2346 printf("%s: unable to load rx DMA map %d, error = %d\n",
2347 sc->sc_dev.dv_xname, idx, error);
2348 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2349 }
2350
2351 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2352 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2353
2354 WM_INIT_RXDESC(sc, idx);
2355
2356 return (0);
2357 }
2358
2359 /*
2360 * wm_set_ral:
2361 *
2362 * Set an entery in the receive address list.
2363 */
2364 static void
2365 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2366 {
2367 uint32_t ral_lo, ral_hi;
2368
2369 if (enaddr != NULL) {
2370 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2371 (enaddr[3] << 24);
2372 ral_hi = enaddr[4] | (enaddr[5] << 8);
2373 ral_hi |= RAL_AV;
2374 } else {
2375 ral_lo = 0;
2376 ral_hi = 0;
2377 }
2378
2379 if (sc->sc_type >= WM_T_82544) {
2380 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2381 ral_lo);
2382 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2383 ral_hi);
2384 } else {
2385 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2386 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2387 }
2388 }
2389
2390 /*
2391 * wm_mchash:
2392 *
2393 * Compute the hash of the multicast address for the 4096-bit
2394 * multicast filter.
2395 */
2396 static uint32_t
2397 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2398 {
2399 static const int lo_shift[4] = { 4, 3, 2, 0 };
2400 static const int hi_shift[4] = { 4, 5, 6, 8 };
2401 uint32_t hash;
2402
2403 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2404 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2405
2406 return (hash & 0xfff);
2407 }
2408
2409 /*
2410 * wm_set_filter:
2411 *
2412 * Set up the receive filter.
2413 */
2414 void
2415 wm_set_filter(struct wm_softc *sc)
2416 {
2417 struct ethercom *ec = &sc->sc_ethercom;
2418 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2419 struct ether_multi *enm;
2420 struct ether_multistep step;
2421 bus_addr_t mta_reg;
2422 uint32_t hash, reg, bit;
2423 int i;
2424
2425 if (sc->sc_type >= WM_T_82544)
2426 mta_reg = WMREG_CORDOVA_MTA;
2427 else
2428 mta_reg = WMREG_MTA;
2429
2430 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2431
2432 if (ifp->if_flags & IFF_BROADCAST)
2433 sc->sc_rctl |= RCTL_BAM;
2434 if (ifp->if_flags & IFF_PROMISC) {
2435 sc->sc_rctl |= RCTL_UPE;
2436 goto allmulti;
2437 }
2438
2439 /*
2440 * Set the station address in the first RAL slot, and
2441 * clear the remaining slots.
2442 */
2443 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2444 for (i = 1; i < WM_RAL_TABSIZE; i++)
2445 wm_set_ral(sc, NULL, i);
2446
2447 /* Clear out the multicast table. */
2448 for (i = 0; i < WM_MC_TABSIZE; i++)
2449 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2450
2451 ETHER_FIRST_MULTI(step, ec, enm);
2452 while (enm != NULL) {
2453 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2454 /*
2455 * We must listen to a range of multicast addresses.
2456 * For now, just accept all multicasts, rather than
2457 * trying to set only those filter bits needed to match
2458 * the range. (At this time, the only use of address
2459 * ranges is for IP multicast routing, for which the
2460 * range is big enough to require all bits set.)
2461 */
2462 goto allmulti;
2463 }
2464
2465 hash = wm_mchash(sc, enm->enm_addrlo);
2466
2467 reg = (hash >> 5) & 0x7f;
2468 bit = hash & 0x1f;
2469
2470 hash = CSR_READ(sc, mta_reg + (reg << 2));
2471 hash |= 1U << bit;
2472
2473 /* XXX Hardware bug?? */
2474 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2475 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2476 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2477 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2478 } else
2479 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2480
2481 ETHER_NEXT_MULTI(step, enm);
2482 }
2483
2484 ifp->if_flags &= ~IFF_ALLMULTI;
2485 goto setit;
2486
2487 allmulti:
2488 ifp->if_flags |= IFF_ALLMULTI;
2489 sc->sc_rctl |= RCTL_MPE;
2490
2491 setit:
2492 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2493 }
2494
2495 /*
2496 * wm_tbi_mediainit:
2497 *
2498 * Initialize media for use on 1000BASE-X devices.
2499 */
2500 void
2501 wm_tbi_mediainit(struct wm_softc *sc)
2502 {
2503 const char *sep = "";
2504
2505 if (sc->sc_type < WM_T_82543)
2506 sc->sc_tipg = TIPG_WM_DFLT;
2507 else
2508 sc->sc_tipg = TIPG_LG_DFLT;
2509
2510 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2511 wm_tbi_mediastatus);
2512
2513 /*
2514 * SWD Pins:
2515 *
2516 * 0 = Link LED (output)
2517 * 1 = Loss Of Signal (input)
2518 */
2519 sc->sc_ctrl |= CTRL_SWDPIO(0);
2520 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2521
2522 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2523
2524 #define ADD(ss, mm, dd) \
2525 do { \
2526 printf("%s%s", sep, ss); \
2527 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2528 sep = ", "; \
2529 } while (/*CONSTCOND*/0)
2530
2531 printf("%s: ", sc->sc_dev.dv_xname);
2532 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2533 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2534 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2535 printf("\n");
2536
2537 #undef ADD
2538
2539 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2540 }
2541
2542 /*
2543 * wm_tbi_mediastatus: [ifmedia interface function]
2544 *
2545 * Get the current interface media status on a 1000BASE-X device.
2546 */
2547 void
2548 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2549 {
2550 struct wm_softc *sc = ifp->if_softc;
2551
2552 ifmr->ifm_status = IFM_AVALID;
2553 ifmr->ifm_active = IFM_ETHER;
2554
2555 if (sc->sc_tbi_linkup == 0) {
2556 ifmr->ifm_active |= IFM_NONE;
2557 return;
2558 }
2559
2560 ifmr->ifm_status |= IFM_ACTIVE;
2561 ifmr->ifm_active |= IFM_1000_SX;
2562 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2563 ifmr->ifm_active |= IFM_FDX;
2564 }
2565
2566 /*
2567 * wm_tbi_mediachange: [ifmedia interface function]
2568 *
2569 * Set hardware to newly-selected media on a 1000BASE-X device.
2570 */
2571 int
2572 wm_tbi_mediachange(struct ifnet *ifp)
2573 {
2574 struct wm_softc *sc = ifp->if_softc;
2575 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2576 uint32_t status;
2577 int i;
2578
2579 sc->sc_txcw = ife->ifm_data;
2580 if (sc->sc_ctrl & CTRL_RFCE)
2581 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2582 if (sc->sc_ctrl & CTRL_TFCE)
2583 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2584 sc->sc_txcw |= TXCW_ANE;
2585
2586 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2587 delay(10000);
2588
2589 sc->sc_tbi_anstate = 0;
2590
2591 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2592 /* Have signal; wait for the link to come up. */
2593 for (i = 0; i < 50; i++) {
2594 delay(10000);
2595 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2596 break;
2597 }
2598
2599 status = CSR_READ(sc, WMREG_STATUS);
2600 if (status & STATUS_LU) {
2601 /* Link is up. */
2602 DPRINTF(WM_DEBUG_LINK,
2603 ("%s: LINK: set media -> link up %s\n",
2604 sc->sc_dev.dv_xname,
2605 (status & STATUS_FD) ? "FDX" : "HDX"));
2606 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2607 if (status & STATUS_FD)
2608 sc->sc_tctl |=
2609 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2610 else
2611 sc->sc_tctl |=
2612 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2613 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2614 sc->sc_tbi_linkup = 1;
2615 } else {
2616 /* Link is down. */
2617 DPRINTF(WM_DEBUG_LINK,
2618 ("%s: LINK: set media -> link down\n",
2619 sc->sc_dev.dv_xname));
2620 sc->sc_tbi_linkup = 0;
2621 }
2622 } else {
2623 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2624 sc->sc_dev.dv_xname));
2625 sc->sc_tbi_linkup = 0;
2626 }
2627
2628 wm_tbi_set_linkled(sc);
2629
2630 return (0);
2631 }
2632
2633 /*
2634 * wm_tbi_set_linkled:
2635 *
2636 * Update the link LED on 1000BASE-X devices.
2637 */
2638 void
2639 wm_tbi_set_linkled(struct wm_softc *sc)
2640 {
2641
2642 if (sc->sc_tbi_linkup)
2643 sc->sc_ctrl |= CTRL_SWDPIN(0);
2644 else
2645 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2646
2647 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2648 }
2649
2650 /*
2651 * wm_tbi_check_link:
2652 *
2653 * Check the link on 1000BASE-X devices.
2654 */
2655 void
2656 wm_tbi_check_link(struct wm_softc *sc)
2657 {
2658 uint32_t rxcw, ctrl, status;
2659
2660 if (sc->sc_tbi_anstate == 0)
2661 return;
2662 else if (sc->sc_tbi_anstate > 1) {
2663 DPRINTF(WM_DEBUG_LINK,
2664 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2665 sc->sc_tbi_anstate));
2666 sc->sc_tbi_anstate--;
2667 return;
2668 }
2669
2670 sc->sc_tbi_anstate = 0;
2671
2672 rxcw = CSR_READ(sc, WMREG_RXCW);
2673 ctrl = CSR_READ(sc, WMREG_CTRL);
2674 status = CSR_READ(sc, WMREG_STATUS);
2675
2676 if ((status & STATUS_LU) == 0) {
2677 DPRINTF(WM_DEBUG_LINK,
2678 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2679 sc->sc_tbi_linkup = 0;
2680 } else {
2681 DPRINTF(WM_DEBUG_LINK,
2682 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2683 (status & STATUS_FD) ? "FDX" : "HDX"));
2684 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2685 if (status & STATUS_FD)
2686 sc->sc_tctl |=
2687 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2688 else
2689 sc->sc_tctl |=
2690 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2691 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2692 sc->sc_tbi_linkup = 1;
2693 }
2694
2695 wm_tbi_set_linkled(sc);
2696 }
2697
2698 /*
2699 * wm_gmii_reset:
2700 *
2701 * Reset the PHY.
2702 */
2703 void
2704 wm_gmii_reset(struct wm_softc *sc)
2705 {
2706 uint32_t reg;
2707
2708 if (sc->sc_type >= WM_T_82544) {
2709 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2710 delay(20000);
2711
2712 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2713 delay(20000);
2714 } else {
2715 /* The PHY reset pin is active-low. */
2716 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2717 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2718 CTRL_EXT_SWDPIN(4));
2719 reg |= CTRL_EXT_SWDPIO(4);
2720
2721 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2722 delay(10);
2723
2724 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2725 delay(10);
2726
2727 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2728 delay(10);
2729 #if 0
2730 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2731 #endif
2732 }
2733 }
2734
2735 /*
2736 * wm_gmii_mediainit:
2737 *
2738 * Initialize media for use on 1000BASE-T devices.
2739 */
2740 void
2741 wm_gmii_mediainit(struct wm_softc *sc)
2742 {
2743 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2744
2745 /* We have MII. */
2746 sc->sc_flags |= WM_F_HAS_MII;
2747
2748 sc->sc_tipg = TIPG_1000T_DFLT;
2749
2750 /*
2751 * Let the chip set speed/duplex on its own based on
2752 * signals from the PHY.
2753 */
2754 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2755 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2756
2757 /* Initialize our media structures and probe the GMII. */
2758 sc->sc_mii.mii_ifp = ifp;
2759
2760 if (sc->sc_type >= WM_T_82544) {
2761 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2762 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2763 } else {
2764 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2765 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2766 }
2767 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2768
2769 wm_gmii_reset(sc);
2770
2771 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2772 wm_gmii_mediastatus);
2773
2774 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2775 MII_OFFSET_ANY, 0);
2776 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2777 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2778 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2779 } else
2780 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2781 }
2782
2783 /*
2784 * wm_gmii_mediastatus: [ifmedia interface function]
2785 *
2786 * Get the current interface media status on a 1000BASE-T device.
2787 */
2788 void
2789 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2790 {
2791 struct wm_softc *sc = ifp->if_softc;
2792
2793 mii_pollstat(&sc->sc_mii);
2794 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2795 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2796 }
2797
2798 /*
2799 * wm_gmii_mediachange: [ifmedia interface function]
2800 *
2801 * Set hardware to newly-selected media on a 1000BASE-T device.
2802 */
2803 int
2804 wm_gmii_mediachange(struct ifnet *ifp)
2805 {
2806 struct wm_softc *sc = ifp->if_softc;
2807
2808 if (ifp->if_flags & IFF_UP)
2809 mii_mediachg(&sc->sc_mii);
2810 return (0);
2811 }
2812
2813 #define MDI_IO CTRL_SWDPIN(2)
2814 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2815 #define MDI_CLK CTRL_SWDPIN(3)
2816
2817 static void
2818 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2819 {
2820 uint32_t i, v;
2821
2822 v = CSR_READ(sc, WMREG_CTRL);
2823 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2824 v |= MDI_DIR | CTRL_SWDPIO(3);
2825
2826 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2827 if (data & i)
2828 v |= MDI_IO;
2829 else
2830 v &= ~MDI_IO;
2831 CSR_WRITE(sc, WMREG_CTRL, v);
2832 delay(10);
2833 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2834 delay(10);
2835 CSR_WRITE(sc, WMREG_CTRL, v);
2836 delay(10);
2837 }
2838 }
2839
2840 static uint32_t
2841 i82543_mii_recvbits(struct wm_softc *sc)
2842 {
2843 uint32_t v, i, data = 0;
2844
2845 v = CSR_READ(sc, WMREG_CTRL);
2846 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2847 v |= CTRL_SWDPIO(3);
2848
2849 CSR_WRITE(sc, WMREG_CTRL, v);
2850 delay(10);
2851 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2852 delay(10);
2853 CSR_WRITE(sc, WMREG_CTRL, v);
2854 delay(10);
2855
2856 for (i = 0; i < 16; i++) {
2857 data <<= 1;
2858 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2859 delay(10);
2860 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2861 data |= 1;
2862 CSR_WRITE(sc, WMREG_CTRL, v);
2863 delay(10);
2864 }
2865
2866 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2867 delay(10);
2868 CSR_WRITE(sc, WMREG_CTRL, v);
2869 delay(10);
2870
2871 return (data);
2872 }
2873
2874 #undef MDI_IO
2875 #undef MDI_DIR
2876 #undef MDI_CLK
2877
2878 /*
2879 * wm_gmii_i82543_readreg: [mii interface function]
2880 *
2881 * Read a PHY register on the GMII (i82543 version).
2882 */
2883 int
2884 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2885 {
2886 struct wm_softc *sc = (void *) self;
2887 int rv;
2888
2889 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2890 i82543_mii_sendbits(sc, reg | (phy << 5) |
2891 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2892 rv = i82543_mii_recvbits(sc) & 0xffff;
2893
2894 DPRINTF(WM_DEBUG_GMII,
2895 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2896 sc->sc_dev.dv_xname, phy, reg, rv));
2897
2898 return (rv);
2899 }
2900
2901 /*
2902 * wm_gmii_i82543_writereg: [mii interface function]
2903 *
2904 * Write a PHY register on the GMII (i82543 version).
2905 */
2906 void
2907 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2908 {
2909 struct wm_softc *sc = (void *) self;
2910
2911 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2912 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2913 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2914 (MII_COMMAND_START << 30), 32);
2915 }
2916
2917 /*
2918 * wm_gmii_i82544_readreg: [mii interface function]
2919 *
2920 * Read a PHY register on the GMII.
2921 */
2922 int
2923 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2924 {
2925 struct wm_softc *sc = (void *) self;
2926 uint32_t mdic;
2927 int i, rv;
2928
2929 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2930 MDIC_REGADD(reg));
2931
2932 for (i = 0; i < 100; i++) {
2933 mdic = CSR_READ(sc, WMREG_MDIC);
2934 if (mdic & MDIC_READY)
2935 break;
2936 delay(10);
2937 }
2938
2939 if ((mdic & MDIC_READY) == 0) {
2940 printf("%s: MDIC read timed out: phy %d reg %d\n",
2941 sc->sc_dev.dv_xname, phy, reg);
2942 rv = 0;
2943 } else if (mdic & MDIC_E) {
2944 #if 0 /* This is normal if no PHY is present. */
2945 printf("%s: MDIC read error: phy %d reg %d\n",
2946 sc->sc_dev.dv_xname, phy, reg);
2947 #endif
2948 rv = 0;
2949 } else {
2950 rv = MDIC_DATA(mdic);
2951 if (rv == 0xffff)
2952 rv = 0;
2953 }
2954
2955 return (rv);
2956 }
2957
2958 /*
2959 * wm_gmii_i82544_writereg: [mii interface function]
2960 *
2961 * Write a PHY register on the GMII.
2962 */
2963 void
2964 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2965 {
2966 struct wm_softc *sc = (void *) self;
2967 uint32_t mdic;
2968 int i;
2969
2970 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2971 MDIC_REGADD(reg) | MDIC_DATA(val));
2972
2973 for (i = 0; i < 100; i++) {
2974 mdic = CSR_READ(sc, WMREG_MDIC);
2975 if (mdic & MDIC_READY)
2976 break;
2977 delay(10);
2978 }
2979
2980 if ((mdic & MDIC_READY) == 0)
2981 printf("%s: MDIC write timed out: phy %d reg %d\n",
2982 sc->sc_dev.dv_xname, phy, reg);
2983 else if (mdic & MDIC_E)
2984 printf("%s: MDIC write error: phy %d reg %d\n",
2985 sc->sc_dev.dv_xname, phy, reg);
2986 }
2987
2988 /*
2989 * wm_gmii_statchg: [mii interface function]
2990 *
2991 * Callback from MII layer when media changes.
2992 */
2993 void
2994 wm_gmii_statchg(struct device *self)
2995 {
2996 struct wm_softc *sc = (void *) self;
2997
2998 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2999
3000 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3001 DPRINTF(WM_DEBUG_LINK,
3002 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3003 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3004 } else {
3005 DPRINTF(WM_DEBUG_LINK,
3006 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3007 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3008 }
3009
3010 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3011 }
3012