if_wm.c revision 1.43 1 /* $NetBSD: if_wm.c,v 1.43 2003/10/17 20:57:32 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.43 2003/10/17 20:57:32 thorpej Exp $");
48
49 #include "bpfilter.h"
50 #include "rnd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/callout.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/ioctl.h>
60 #include <sys/errno.h>
61 #include <sys/device.h>
62 #include <sys/queue.h>
63
64 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
65
66 #if NRND > 0
67 #include <sys/rnd.h>
68 #endif
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_ether.h>
74
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <netinet/in.h> /* XXX for struct ip */
80 #include <netinet/in_systm.h> /* XXX for struct ip */
81 #include <netinet/ip.h> /* XXX for struct ip */
82 #include <netinet/tcp.h> /* XXX for struct tcphdr */
83
84 #include <machine/bus.h>
85 #include <machine/intr.h>
86 #include <machine/endian.h>
87
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 #include <dev/mii/mii_bitbang.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95
96 #include <dev/pci/if_wmreg.h>
97
98 #ifdef WM_DEBUG
99 #define WM_DEBUG_LINK 0x01
100 #define WM_DEBUG_TX 0x02
101 #define WM_DEBUG_RX 0x04
102 #define WM_DEBUG_GMII 0x08
103 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
104
105 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
106 #else
107 #define DPRINTF(x, y) /* nothing */
108 #endif /* WM_DEBUG */
109
110 /*
111 * Transmit descriptor list size. Due to errata, we can only have
112 * 256 hardware descriptors in the ring. We tell the upper layers
113 * that they can queue a lot of packets, and we go ahead and manage
114 * up to 64 of them at a time. We allow up to 16 DMA segments per
115 * packet.
116 */
117 #define WM_NTXSEGS 16
118 #define WM_IFQUEUELEN 256
119 #define WM_TXQUEUELEN 64
120 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
121 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
122 #define WM_NTXDESC 256
123 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
124 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
125 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
126
127 /*
128 * Receive descriptor list size. We have one Rx buffer for normal
129 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
130 * packet. We allocate 256 receive descriptors, each with a 2k
131 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
132 */
133 #define WM_NRXDESC 256
134 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
135 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
136 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
137
138 /*
139 * Control structures are DMA'd to the i82542 chip. We allocate them in
140 * a single clump that maps to a single DMA segment to make serveral things
141 * easier.
142 */
143 struct wm_control_data {
144 /*
145 * The transmit descriptors.
146 */
147 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
148
149 /*
150 * The receive descriptors.
151 */
152 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
153 };
154
155 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
156 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
157 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
158
159 /*
160 * Software state for transmit jobs.
161 */
162 struct wm_txsoft {
163 struct mbuf *txs_mbuf; /* head of our mbuf chain */
164 bus_dmamap_t txs_dmamap; /* our DMA map */
165 int txs_firstdesc; /* first descriptor in packet */
166 int txs_lastdesc; /* last descriptor in packet */
167 int txs_ndesc; /* # of descriptors used */
168 };
169
170 /*
171 * Software state for receive buffers. Each descriptor gets a
172 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
173 * more than one buffer, we chain them together.
174 */
175 struct wm_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 typedef enum {
181 WM_T_unknown = 0,
182 WM_T_82542_2_0, /* i82542 2.0 (really old) */
183 WM_T_82542_2_1, /* i82542 2.1+ (old) */
184 WM_T_82543, /* i82543 */
185 WM_T_82544, /* i82544 */
186 WM_T_82540, /* i82540 */
187 WM_T_82545, /* i82545 */
188 WM_T_82545_3, /* i82545 3.0+ */
189 WM_T_82546, /* i82546 */
190 WM_T_82546_3, /* i82546 3.0+ */
191 WM_T_82541, /* i82541 */
192 WM_T_82541_2, /* i82541 2.0+ */
193 WM_T_82547, /* i82547 */
194 WM_T_82547_2, /* i82547 2.0+ */
195 } wm_chip_type;
196
197 /*
198 * Software state per device.
199 */
200 struct wm_softc {
201 struct device sc_dev; /* generic device information */
202 bus_space_tag_t sc_st; /* bus space tag */
203 bus_space_handle_t sc_sh; /* bus space handle */
204 bus_dma_tag_t sc_dmat; /* bus DMA tag */
205 struct ethercom sc_ethercom; /* ethernet common data */
206 void *sc_sdhook; /* shutdown hook */
207
208 wm_chip_type sc_type; /* chip type */
209 int sc_flags; /* flags; see below */
210
211 void *sc_ih; /* interrupt cookie */
212
213 struct mii_data sc_mii; /* MII/media information */
214
215 struct callout sc_tick_ch; /* tick callout */
216
217 bus_dmamap_t sc_cddmamap; /* control data DMA map */
218 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
219
220 int sc_align_tweak;
221
222 /*
223 * Software state for the transmit and receive descriptors.
224 */
225 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
226 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
227
228 /*
229 * Control data structures.
230 */
231 struct wm_control_data *sc_control_data;
232 #define sc_txdescs sc_control_data->wcd_txdescs
233 #define sc_rxdescs sc_control_data->wcd_rxdescs
234
235 #ifdef WM_EVENT_COUNTERS
236 /* Event counters. */
237 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
238 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
239 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
240 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
241 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
242 struct evcnt sc_ev_rxintr; /* Rx interrupts */
243 struct evcnt sc_ev_linkintr; /* Link interrupts */
244
245 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
246 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
247 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
248 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
249
250 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
251 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
252 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
253
254 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
255 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
256
257 struct evcnt sc_ev_tu; /* Tx underrun */
258 #endif /* WM_EVENT_COUNTERS */
259
260 bus_addr_t sc_tdt_reg; /* offset of TDT register */
261
262 int sc_txfree; /* number of free Tx descriptors */
263 int sc_txnext; /* next ready Tx descriptor */
264
265 int sc_txsfree; /* number of free Tx jobs */
266 int sc_txsnext; /* next free Tx job */
267 int sc_txsdirty; /* dirty Tx jobs */
268
269 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
270 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
271
272 bus_addr_t sc_rdt_reg; /* offset of RDT register */
273
274 int sc_rxptr; /* next ready Rx descriptor/queue ent */
275 int sc_rxdiscard;
276 int sc_rxlen;
277 struct mbuf *sc_rxhead;
278 struct mbuf *sc_rxtail;
279 struct mbuf **sc_rxtailp;
280
281 uint32_t sc_ctrl; /* prototype CTRL register */
282 #if 0
283 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
284 #endif
285 uint32_t sc_icr; /* prototype interrupt bits */
286 uint32_t sc_tctl; /* prototype TCTL register */
287 uint32_t sc_rctl; /* prototype RCTL register */
288 uint32_t sc_txcw; /* prototype TXCW register */
289 uint32_t sc_tipg; /* prototype TIPG register */
290
291 int sc_tbi_linkup; /* TBI link status */
292 int sc_tbi_anstate; /* autonegotiation state */
293
294 int sc_mchash_type; /* multicast filter offset */
295
296 #if NRND > 0
297 rndsource_element_t rnd_source; /* random source */
298 #endif
299 };
300
301 #define WM_RXCHAIN_RESET(sc) \
302 do { \
303 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
304 *(sc)->sc_rxtailp = NULL; \
305 (sc)->sc_rxlen = 0; \
306 } while (/*CONSTCOND*/0)
307
308 #define WM_RXCHAIN_LINK(sc, m) \
309 do { \
310 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
311 (sc)->sc_rxtailp = &(m)->m_next; \
312 } while (/*CONSTCOND*/0)
313
314 /* sc_flags */
315 #define WM_F_HAS_MII 0x01 /* has MII */
316 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
317
318 #ifdef WM_EVENT_COUNTERS
319 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
320 #else
321 #define WM_EVCNT_INCR(ev) /* nothing */
322 #endif
323
324 #define CSR_READ(sc, reg) \
325 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
326 #define CSR_WRITE(sc, reg, val) \
327 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
328
329 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
330 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
331
332 #define WM_CDTXSYNC(sc, x, n, ops) \
333 do { \
334 int __x, __n; \
335 \
336 __x = (x); \
337 __n = (n); \
338 \
339 /* If it will wrap around, sync to the end of the ring. */ \
340 if ((__x + __n) > WM_NTXDESC) { \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
343 (WM_NTXDESC - __x), (ops)); \
344 __n -= (WM_NTXDESC - __x); \
345 __x = 0; \
346 } \
347 \
348 /* Now sync whatever is left. */ \
349 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
350 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
351 } while (/*CONSTCOND*/0)
352
353 #define WM_CDRXSYNC(sc, x, ops) \
354 do { \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
357 } while (/*CONSTCOND*/0)
358
359 #define WM_INIT_RXDESC(sc, x) \
360 do { \
361 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
362 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
363 struct mbuf *__m = __rxs->rxs_mbuf; \
364 \
365 /* \
366 * Note: We scoot the packet forward 2 bytes in the buffer \
367 * so that the payload after the Ethernet header is aligned \
368 * to a 4-byte boundary. \
369 * \
370 * XXX BRAINDAMAGE ALERT! \
371 * The stupid chip uses the same size for every buffer, which \
372 * is set in the Receive Control register. We are using the 2K \
373 * size option, but what we REALLY want is (2K - 2)! For this \
374 * reason, we can't "scoot" packets longer than the standard \
375 * Ethernet MTU. On strict-alignment platforms, if the total \
376 * size exceeds (2K - 2) we set align_tweak to 0 and let \
377 * the upper layer copy the headers. \
378 */ \
379 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
380 \
381 __rxd->wrx_addr.wa_low = \
382 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
383 (sc)->sc_align_tweak); \
384 __rxd->wrx_addr.wa_high = 0; \
385 __rxd->wrx_len = 0; \
386 __rxd->wrx_cksum = 0; \
387 __rxd->wrx_status = 0; \
388 __rxd->wrx_errors = 0; \
389 __rxd->wrx_special = 0; \
390 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
391 \
392 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
393 } while (/*CONSTCOND*/0)
394
395 void wm_start(struct ifnet *);
396 void wm_watchdog(struct ifnet *);
397 int wm_ioctl(struct ifnet *, u_long, caddr_t);
398 int wm_init(struct ifnet *);
399 void wm_stop(struct ifnet *, int);
400
401 void wm_shutdown(void *);
402
403 void wm_reset(struct wm_softc *);
404 void wm_rxdrain(struct wm_softc *);
405 int wm_add_rxbuf(struct wm_softc *, int);
406 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
407 void wm_tick(void *);
408
409 void wm_set_filter(struct wm_softc *);
410
411 int wm_intr(void *);
412 void wm_txintr(struct wm_softc *);
413 void wm_rxintr(struct wm_softc *);
414 void wm_linkintr(struct wm_softc *, uint32_t);
415
416 void wm_tbi_mediainit(struct wm_softc *);
417 int wm_tbi_mediachange(struct ifnet *);
418 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
419
420 void wm_tbi_set_linkled(struct wm_softc *);
421 void wm_tbi_check_link(struct wm_softc *);
422
423 void wm_gmii_reset(struct wm_softc *);
424
425 int wm_gmii_i82543_readreg(struct device *, int, int);
426 void wm_gmii_i82543_writereg(struct device *, int, int, int);
427
428 int wm_gmii_i82544_readreg(struct device *, int, int);
429 void wm_gmii_i82544_writereg(struct device *, int, int, int);
430
431 void wm_gmii_statchg(struct device *);
432
433 void wm_gmii_mediainit(struct wm_softc *);
434 int wm_gmii_mediachange(struct ifnet *);
435 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
436
437 int wm_match(struct device *, struct cfdata *, void *);
438 void wm_attach(struct device *, struct device *, void *);
439
440 CFATTACH_DECL(wm, sizeof(struct wm_softc),
441 wm_match, wm_attach, NULL, NULL);
442
443 /*
444 * Devices supported by this driver.
445 */
446 const struct wm_product {
447 pci_vendor_id_t wmp_vendor;
448 pci_product_id_t wmp_product;
449 const char *wmp_name;
450 wm_chip_type wmp_type;
451 int wmp_flags;
452 #define WMP_F_1000X 0x01
453 #define WMP_F_1000T 0x02
454 } wm_products[] = {
455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
456 "Intel i82542 1000BASE-X Ethernet",
457 WM_T_82542_2_1, WMP_F_1000X },
458
459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
460 "Intel i82543GC 1000BASE-X Ethernet",
461 WM_T_82543, WMP_F_1000X },
462
463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
464 "Intel i82543GC 1000BASE-T Ethernet",
465 WM_T_82543, WMP_F_1000T },
466
467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
468 "Intel i82544EI 1000BASE-T Ethernet",
469 WM_T_82544, WMP_F_1000T },
470
471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
472 "Intel i82544EI 1000BASE-X Ethernet",
473 WM_T_82544, WMP_F_1000X },
474
475 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
476 "Intel i82544GC 1000BASE-T Ethernet",
477 WM_T_82544, WMP_F_1000T },
478
479 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
480 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
481 WM_T_82544, WMP_F_1000T },
482
483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
484 "Intel i82540EM 1000BASE-T Ethernet",
485 WM_T_82540, WMP_F_1000T },
486
487 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
488 "Intel i82540EP 1000BASE-T Ethernet",
489 WM_T_82540, WMP_F_1000T },
490
491 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
492 "Intel i82540EP 1000BASE-T Ethernet",
493 WM_T_82540, WMP_F_1000T },
494
495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
496 "Intel i82540EP 1000BASE-T Ethernet",
497 WM_T_82540, WMP_F_1000T },
498
499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
500 "Intel i82545EM 1000BASE-T Ethernet",
501 WM_T_82545, WMP_F_1000T },
502
503 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
504 "Intel i82546EB 1000BASE-T Ethernet",
505 WM_T_82546, WMP_F_1000T },
506
507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
508 "Intel i82546EB 1000BASE-T Ethernet",
509 WM_T_82546, WMP_F_1000T },
510
511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
512 "Intel i82545EM 1000BASE-X Ethernet",
513 WM_T_82545, WMP_F_1000X },
514
515 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
516 "Intel i82546EB 1000BASE-X Ethernet",
517 WM_T_82546, WMP_F_1000X },
518
519 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
520 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
521 WM_T_82540, WMP_F_1000T },
522
523 { 0, 0,
524 NULL,
525 0, 0 },
526 };
527
528 #ifdef WM_EVENT_COUNTERS
529 #if WM_NTXSEGS != 16
530 #error Update wm_txseg_evcnt_names
531 #endif
532 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
533 "txseg1",
534 "txseg2",
535 "txseg3",
536 "txseg4",
537 "txseg5",
538 "txseg6",
539 "txseg7",
540 "txseg8",
541 "txseg9",
542 "txseg10",
543 "txseg11",
544 "txseg12",
545 "txseg13",
546 "txseg14",
547 "txseg15",
548 "txseg16",
549 };
550 #endif /* WM_EVENT_COUNTERS */
551
552 static const struct wm_product *
553 wm_lookup(const struct pci_attach_args *pa)
554 {
555 const struct wm_product *wmp;
556
557 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
558 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
559 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
560 return (wmp);
561 }
562 return (NULL);
563 }
564
565 int
566 wm_match(struct device *parent, struct cfdata *cf, void *aux)
567 {
568 struct pci_attach_args *pa = aux;
569
570 if (wm_lookup(pa) != NULL)
571 return (1);
572
573 return (0);
574 }
575
576 void
577 wm_attach(struct device *parent, struct device *self, void *aux)
578 {
579 struct wm_softc *sc = (void *) self;
580 struct pci_attach_args *pa = aux;
581 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
582 pci_chipset_tag_t pc = pa->pa_pc;
583 pci_intr_handle_t ih;
584 const char *intrstr = NULL;
585 bus_space_tag_t memt;
586 bus_space_handle_t memh;
587 bus_dma_segment_t seg;
588 int memh_valid;
589 int i, rseg, error;
590 const struct wm_product *wmp;
591 uint8_t enaddr[ETHER_ADDR_LEN];
592 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
593 pcireg_t preg, memtype;
594 int pmreg;
595
596 callout_init(&sc->sc_tick_ch);
597
598 wmp = wm_lookup(pa);
599 if (wmp == NULL) {
600 printf("\n");
601 panic("wm_attach: impossible");
602 }
603
604 sc->sc_dmat = pa->pa_dmat;
605
606 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
607 aprint_naive(": Ethernet controller\n");
608 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
609
610 sc->sc_type = wmp->wmp_type;
611 if (sc->sc_type < WM_T_82543) {
612 if (preg < 2) {
613 aprint_error("%s: i82542 must be at least rev. 2\n",
614 sc->sc_dev.dv_xname);
615 return;
616 }
617 if (preg < 3)
618 sc->sc_type = WM_T_82542_2_0;
619 }
620
621 /*
622 * Some chips require a handshake to access the EEPROM.
623 */
624 if (sc->sc_type >= WM_T_82540)
625 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
626
627 /*
628 * Map the device.
629 */
630 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
631 switch (memtype) {
632 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
633 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
634 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
635 memtype, 0, &memt, &memh, NULL, NULL) == 0);
636 break;
637 default:
638 memh_valid = 0;
639 }
640
641 if (memh_valid) {
642 sc->sc_st = memt;
643 sc->sc_sh = memh;
644 } else {
645 aprint_error("%s: unable to map device registers\n",
646 sc->sc_dev.dv_xname);
647 return;
648 }
649
650 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
651 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
652 preg |= PCI_COMMAND_MASTER_ENABLE;
653 if (sc->sc_type < WM_T_82542_2_1)
654 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
655 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
656
657 /* Get it out of power save mode, if needed. */
658 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
659 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
660 PCI_PMCSR_STATE_MASK;
661 if (preg == PCI_PMCSR_STATE_D3) {
662 /*
663 * The card has lost all configuration data in
664 * this state, so punt.
665 */
666 aprint_error("%s: unable to wake from power state D3\n",
667 sc->sc_dev.dv_xname);
668 return;
669 }
670 if (preg != PCI_PMCSR_STATE_D0) {
671 aprint_normal("%s: waking up from power state D%d\n",
672 sc->sc_dev.dv_xname, preg);
673 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
674 PCI_PMCSR_STATE_D0);
675 }
676 }
677
678 /*
679 * Map and establish our interrupt.
680 */
681 if (pci_intr_map(pa, &ih)) {
682 aprint_error("%s: unable to map interrupt\n",
683 sc->sc_dev.dv_xname);
684 return;
685 }
686 intrstr = pci_intr_string(pc, ih);
687 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
688 if (sc->sc_ih == NULL) {
689 aprint_error("%s: unable to establish interrupt",
690 sc->sc_dev.dv_xname);
691 if (intrstr != NULL)
692 aprint_normal(" at %s", intrstr);
693 aprint_normal("\n");
694 return;
695 }
696 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
697
698 /*
699 * Allocate the control data structures, and create and load the
700 * DMA map for it.
701 */
702 if ((error = bus_dmamem_alloc(sc->sc_dmat,
703 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
704 0)) != 0) {
705 aprint_error(
706 "%s: unable to allocate control data, error = %d\n",
707 sc->sc_dev.dv_xname, error);
708 goto fail_0;
709 }
710
711 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
712 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
713 0)) != 0) {
714 aprint_error("%s: unable to map control data, error = %d\n",
715 sc->sc_dev.dv_xname, error);
716 goto fail_1;
717 }
718
719 if ((error = bus_dmamap_create(sc->sc_dmat,
720 sizeof(struct wm_control_data), 1,
721 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
722 aprint_error("%s: unable to create control data DMA map, "
723 "error = %d\n", sc->sc_dev.dv_xname, error);
724 goto fail_2;
725 }
726
727 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
728 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
729 0)) != 0) {
730 aprint_error(
731 "%s: unable to load control data DMA map, error = %d\n",
732 sc->sc_dev.dv_xname, error);
733 goto fail_3;
734 }
735
736 /*
737 * Create the transmit buffer DMA maps.
738 */
739 for (i = 0; i < WM_TXQUEUELEN; i++) {
740 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
741 WM_NTXSEGS, MCLBYTES, 0, 0,
742 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
743 aprint_error("%s: unable to create Tx DMA map %d, "
744 "error = %d\n", sc->sc_dev.dv_xname, i, error);
745 goto fail_4;
746 }
747 }
748
749 /*
750 * Create the receive buffer DMA maps.
751 */
752 for (i = 0; i < WM_NRXDESC; i++) {
753 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
754 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
755 aprint_error("%s: unable to create Rx DMA map %d, "
756 "error = %d\n", sc->sc_dev.dv_xname, i, error);
757 goto fail_5;
758 }
759 sc->sc_rxsoft[i].rxs_mbuf = NULL;
760 }
761
762 /*
763 * Reset the chip to a known state.
764 */
765 wm_reset(sc);
766
767 /*
768 * Read the Ethernet address from the EEPROM.
769 */
770 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
771 sizeof(myea) / sizeof(myea[0]), myea);
772 enaddr[0] = myea[0] & 0xff;
773 enaddr[1] = myea[0] >> 8;
774 enaddr[2] = myea[1] & 0xff;
775 enaddr[3] = myea[1] >> 8;
776 enaddr[4] = myea[2] & 0xff;
777 enaddr[5] = myea[2] >> 8;
778
779 /*
780 * Toggle the LSB of the MAC address on the second port
781 * of the i82546.
782 */
783 if (sc->sc_type == WM_T_82546) {
784 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
785 enaddr[5] ^= 1;
786 }
787
788 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
789 ether_sprintf(enaddr));
790
791 /*
792 * Read the config info from the EEPROM, and set up various
793 * bits in the control registers based on their contents.
794 */
795 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
796 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
797 if (sc->sc_type >= WM_T_82544)
798 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
799
800 if (cfg1 & EEPROM_CFG1_ILOS)
801 sc->sc_ctrl |= CTRL_ILOS;
802 if (sc->sc_type >= WM_T_82544) {
803 sc->sc_ctrl |=
804 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
805 CTRL_SWDPIO_SHIFT;
806 sc->sc_ctrl |=
807 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
808 CTRL_SWDPINS_SHIFT;
809 } else {
810 sc->sc_ctrl |=
811 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
812 CTRL_SWDPIO_SHIFT;
813 }
814
815 #if 0
816 if (sc->sc_type >= WM_T_82544) {
817 if (cfg1 & EEPROM_CFG1_IPS0)
818 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
819 if (cfg1 & EEPROM_CFG1_IPS1)
820 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
821 sc->sc_ctrl_ext |=
822 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
823 CTRL_EXT_SWDPIO_SHIFT;
824 sc->sc_ctrl_ext |=
825 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
826 CTRL_EXT_SWDPINS_SHIFT;
827 } else {
828 sc->sc_ctrl_ext |=
829 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
830 CTRL_EXT_SWDPIO_SHIFT;
831 }
832 #endif
833
834 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
835 #if 0
836 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
837 #endif
838
839 /*
840 * Set up some register offsets that are different between
841 * the i82542 and the i82543 and later chips.
842 */
843 if (sc->sc_type < WM_T_82543) {
844 sc->sc_rdt_reg = WMREG_OLD_RDT0;
845 sc->sc_tdt_reg = WMREG_OLD_TDT;
846 } else {
847 sc->sc_rdt_reg = WMREG_RDT;
848 sc->sc_tdt_reg = WMREG_TDT;
849 }
850
851 /*
852 * Determine if we should use flow control. We should
853 * always use it, unless we're on a i82542 < 2.1.
854 */
855 if (sc->sc_type >= WM_T_82542_2_1)
856 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
857
858 /*
859 * Determine if we're TBI or GMII mode, and initialize the
860 * media structures accordingly.
861 */
862 if (sc->sc_type < WM_T_82543 ||
863 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
864 if (wmp->wmp_flags & WMP_F_1000T)
865 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
866 "product!\n", sc->sc_dev.dv_xname);
867 wm_tbi_mediainit(sc);
868 } else {
869 if (wmp->wmp_flags & WMP_F_1000X)
870 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
871 "product!\n", sc->sc_dev.dv_xname);
872 wm_gmii_mediainit(sc);
873 }
874
875 ifp = &sc->sc_ethercom.ec_if;
876 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
877 ifp->if_softc = sc;
878 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
879 ifp->if_ioctl = wm_ioctl;
880 ifp->if_start = wm_start;
881 ifp->if_watchdog = wm_watchdog;
882 ifp->if_init = wm_init;
883 ifp->if_stop = wm_stop;
884 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
885 IFQ_SET_READY(&ifp->if_snd);
886
887 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
888
889 /*
890 * If we're a i82543 or greater, we can support VLANs.
891 */
892 if (sc->sc_type >= WM_T_82543)
893 sc->sc_ethercom.ec_capabilities |=
894 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
895
896 /*
897 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
898 * on i82543 and later.
899 */
900 if (sc->sc_type >= WM_T_82543)
901 ifp->if_capabilities |=
902 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
903
904 /*
905 * Attach the interface.
906 */
907 if_attach(ifp);
908 ether_ifattach(ifp, enaddr);
909 #if NRND > 0
910 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
911 RND_TYPE_NET, 0);
912 #endif
913
914 #ifdef WM_EVENT_COUNTERS
915 /* Attach event counters. */
916 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
917 NULL, sc->sc_dev.dv_xname, "txsstall");
918 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
919 NULL, sc->sc_dev.dv_xname, "txdstall");
920 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
921 NULL, sc->sc_dev.dv_xname, "txforceintr");
922 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
923 NULL, sc->sc_dev.dv_xname, "txdw");
924 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
925 NULL, sc->sc_dev.dv_xname, "txqe");
926 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
927 NULL, sc->sc_dev.dv_xname, "rxintr");
928 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
929 NULL, sc->sc_dev.dv_xname, "linkintr");
930
931 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
932 NULL, sc->sc_dev.dv_xname, "rxipsum");
933 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
934 NULL, sc->sc_dev.dv_xname, "rxtusum");
935 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txipsum");
937 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
938 NULL, sc->sc_dev.dv_xname, "txtusum");
939
940 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
941 NULL, sc->sc_dev.dv_xname, "txctx init");
942 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
943 NULL, sc->sc_dev.dv_xname, "txctx hit");
944 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
945 NULL, sc->sc_dev.dv_xname, "txctx miss");
946
947 for (i = 0; i < WM_NTXSEGS; i++)
948 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
949 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
950
951 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
952 NULL, sc->sc_dev.dv_xname, "txdrop");
953
954 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
955 NULL, sc->sc_dev.dv_xname, "tu");
956 #endif /* WM_EVENT_COUNTERS */
957
958 /*
959 * Make sure the interface is shutdown during reboot.
960 */
961 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
962 if (sc->sc_sdhook == NULL)
963 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
964 sc->sc_dev.dv_xname);
965 return;
966
967 /*
968 * Free any resources we've allocated during the failed attach
969 * attempt. Do this in reverse order and fall through.
970 */
971 fail_5:
972 for (i = 0; i < WM_NRXDESC; i++) {
973 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
974 bus_dmamap_destroy(sc->sc_dmat,
975 sc->sc_rxsoft[i].rxs_dmamap);
976 }
977 fail_4:
978 for (i = 0; i < WM_TXQUEUELEN; i++) {
979 if (sc->sc_txsoft[i].txs_dmamap != NULL)
980 bus_dmamap_destroy(sc->sc_dmat,
981 sc->sc_txsoft[i].txs_dmamap);
982 }
983 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
984 fail_3:
985 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
986 fail_2:
987 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
988 sizeof(struct wm_control_data));
989 fail_1:
990 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
991 fail_0:
992 return;
993 }
994
995 /*
996 * wm_shutdown:
997 *
998 * Make sure the interface is stopped at reboot time.
999 */
1000 void
1001 wm_shutdown(void *arg)
1002 {
1003 struct wm_softc *sc = arg;
1004
1005 wm_stop(&sc->sc_ethercom.ec_if, 1);
1006 }
1007
1008 /*
1009 * wm_tx_cksum:
1010 *
1011 * Set up TCP/IP checksumming parameters for the
1012 * specified packet.
1013 */
1014 static int
1015 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1016 uint32_t *fieldsp)
1017 {
1018 struct mbuf *m0 = txs->txs_mbuf;
1019 struct livengood_tcpip_ctxdesc *t;
1020 uint32_t fields = 0, ipcs, tucs;
1021 struct ip *ip;
1022 struct ether_header *eh;
1023 int offset, iphl;
1024
1025 /*
1026 * XXX It would be nice if the mbuf pkthdr had offset
1027 * fields for the protocol headers.
1028 */
1029
1030 eh = mtod(m0, struct ether_header *);
1031 switch (htons(eh->ether_type)) {
1032 case ETHERTYPE_IP:
1033 iphl = sizeof(struct ip);
1034 offset = ETHER_HDR_LEN;
1035 break;
1036
1037 case ETHERTYPE_VLAN:
1038 iphl = sizeof(struct ip);
1039 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1040 break;
1041
1042 default:
1043 /*
1044 * Don't support this protocol or encapsulation.
1045 */
1046 *fieldsp = 0;
1047 *cmdp = 0;
1048 return (0);
1049 }
1050
1051 if (m0->m_len < (offset + iphl)) {
1052 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1053 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1054 "packet dropped\n", sc->sc_dev.dv_xname);
1055 return (ENOMEM);
1056 }
1057 m0 = txs->txs_mbuf;
1058 }
1059
1060 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1061 iphl = ip->ip_hl << 2;
1062
1063 /*
1064 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1065 * offload feature, if we load the context descriptor, we
1066 * MUST provide valid values for IPCSS and TUCSS fields.
1067 */
1068
1069 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1070 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1071 fields |= htole32(WTX_IXSM);
1072 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1073 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1074 WTX_TCPIP_IPCSE(offset + iphl - 1));
1075 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1076 /* Use the cached value. */
1077 ipcs = sc->sc_txctx_ipcs;
1078 } else {
1079 /* Just initialize it to the likely value anyway. */
1080 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1081 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1082 WTX_TCPIP_IPCSE(offset + iphl - 1));
1083 }
1084
1085 offset += iphl;
1086
1087 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1088 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1089 fields |= htole32(WTX_TXSM);
1090 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1091 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1092 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1093 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1094 /* Use the cached value. */
1095 tucs = sc->sc_txctx_tucs;
1096 } else {
1097 /* Just initialize it to a valid TCP context. */
1098 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1099 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1100 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1101 }
1102
1103 if (sc->sc_txctx_ipcs == ipcs &&
1104 sc->sc_txctx_tucs == tucs) {
1105 /* Cached context is fine. */
1106 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1107 } else {
1108 /* Fill in the context descriptor. */
1109 #ifdef WM_EVENT_COUNTERS
1110 if (sc->sc_txctx_ipcs == 0xffffffff &&
1111 sc->sc_txctx_tucs == 0xffffffff)
1112 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1113 else
1114 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1115 #endif
1116 t = (struct livengood_tcpip_ctxdesc *)
1117 &sc->sc_txdescs[sc->sc_txnext];
1118 t->tcpip_ipcs = ipcs;
1119 t->tcpip_tucs = tucs;
1120 t->tcpip_cmdlen =
1121 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1122 t->tcpip_seg = 0;
1123 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1124
1125 sc->sc_txctx_ipcs = ipcs;
1126 sc->sc_txctx_tucs = tucs;
1127
1128 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1129 txs->txs_ndesc++;
1130 }
1131
1132 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1133 *fieldsp = fields;
1134
1135 return (0);
1136 }
1137
1138 /*
1139 * wm_start: [ifnet interface function]
1140 *
1141 * Start packet transmission on the interface.
1142 */
1143 void
1144 wm_start(struct ifnet *ifp)
1145 {
1146 struct wm_softc *sc = ifp->if_softc;
1147 struct mbuf *m0;
1148 #if 0 /* XXXJRT */
1149 struct m_tag *mtag;
1150 #endif
1151 struct wm_txsoft *txs;
1152 bus_dmamap_t dmamap;
1153 int error, nexttx, lasttx, ofree, seg;
1154 uint32_t cksumcmd, cksumfields;
1155
1156 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1157 return;
1158
1159 /*
1160 * Remember the previous number of free descriptors.
1161 */
1162 ofree = sc->sc_txfree;
1163
1164 /*
1165 * Loop through the send queue, setting up transmit descriptors
1166 * until we drain the queue, or use up all available transmit
1167 * descriptors.
1168 */
1169 for (;;) {
1170 /* Grab a packet off the queue. */
1171 IFQ_POLL(&ifp->if_snd, m0);
1172 if (m0 == NULL)
1173 break;
1174
1175 DPRINTF(WM_DEBUG_TX,
1176 ("%s: TX: have packet to transmit: %p\n",
1177 sc->sc_dev.dv_xname, m0));
1178
1179 /* Get a work queue entry. */
1180 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1181 wm_txintr(sc);
1182 if (sc->sc_txsfree == 0) {
1183 DPRINTF(WM_DEBUG_TX,
1184 ("%s: TX: no free job descriptors\n",
1185 sc->sc_dev.dv_xname));
1186 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1187 break;
1188 }
1189 }
1190
1191 txs = &sc->sc_txsoft[sc->sc_txsnext];
1192 dmamap = txs->txs_dmamap;
1193
1194 /*
1195 * Load the DMA map. If this fails, the packet either
1196 * didn't fit in the allotted number of segments, or we
1197 * were short on resources. For the too-many-segments
1198 * case, we simply report an error and drop the packet,
1199 * since we can't sanely copy a jumbo packet to a single
1200 * buffer.
1201 */
1202 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1203 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1204 if (error) {
1205 if (error == EFBIG) {
1206 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1207 printf("%s: Tx packet consumes too many "
1208 "DMA segments, dropping...\n",
1209 sc->sc_dev.dv_xname);
1210 IFQ_DEQUEUE(&ifp->if_snd, m0);
1211 m_freem(m0);
1212 continue;
1213 }
1214 /*
1215 * Short on resources, just stop for now.
1216 */
1217 DPRINTF(WM_DEBUG_TX,
1218 ("%s: TX: dmamap load failed: %d\n",
1219 sc->sc_dev.dv_xname, error));
1220 break;
1221 }
1222
1223 /*
1224 * Ensure we have enough descriptors free to describe
1225 * the packet. Note, we always reserve one descriptor
1226 * at the end of the ring due to the semantics of the
1227 * TDT register, plus one more in the event we need
1228 * to re-load checksum offload context.
1229 */
1230 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1231 /*
1232 * Not enough free descriptors to transmit this
1233 * packet. We haven't committed anything yet,
1234 * so just unload the DMA map, put the packet
1235 * pack on the queue, and punt. Notify the upper
1236 * layer that there are no more slots left.
1237 */
1238 DPRINTF(WM_DEBUG_TX,
1239 ("%s: TX: need %d descriptors, have %d\n",
1240 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1241 sc->sc_txfree - 1));
1242 ifp->if_flags |= IFF_OACTIVE;
1243 bus_dmamap_unload(sc->sc_dmat, dmamap);
1244 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1245 break;
1246 }
1247
1248 IFQ_DEQUEUE(&ifp->if_snd, m0);
1249
1250 /*
1251 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1252 */
1253
1254 /* Sync the DMA map. */
1255 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1256 BUS_DMASYNC_PREWRITE);
1257
1258 DPRINTF(WM_DEBUG_TX,
1259 ("%s: TX: packet has %d DMA segments\n",
1260 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1261
1262 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1263
1264 /*
1265 * Store a pointer to the packet so that we can free it
1266 * later.
1267 *
1268 * Initially, we consider the number of descriptors the
1269 * packet uses the number of DMA segments. This may be
1270 * incremented by 1 if we do checksum offload (a descriptor
1271 * is used to set the checksum context).
1272 */
1273 txs->txs_mbuf = m0;
1274 txs->txs_firstdesc = sc->sc_txnext;
1275 txs->txs_ndesc = dmamap->dm_nsegs;
1276
1277 /*
1278 * Set up checksum offload parameters for
1279 * this packet.
1280 */
1281 if (m0->m_pkthdr.csum_flags &
1282 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1283 if (wm_tx_cksum(sc, txs, &cksumcmd,
1284 &cksumfields) != 0) {
1285 /* Error message already displayed. */
1286 bus_dmamap_unload(sc->sc_dmat, dmamap);
1287 continue;
1288 }
1289 } else {
1290 cksumcmd = 0;
1291 cksumfields = 0;
1292 }
1293
1294 cksumcmd |= htole32(WTX_CMD_IDE);
1295
1296 /*
1297 * Initialize the transmit descriptor.
1298 */
1299 for (nexttx = sc->sc_txnext, seg = 0;
1300 seg < dmamap->dm_nsegs;
1301 seg++, nexttx = WM_NEXTTX(nexttx)) {
1302 /*
1303 * Note: we currently only use 32-bit DMA
1304 * addresses.
1305 */
1306 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1307 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1308 htole32(dmamap->dm_segs[seg].ds_addr);
1309 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1310 htole32(dmamap->dm_segs[seg].ds_len);
1311 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1312 cksumfields;
1313 lasttx = nexttx;
1314
1315 DPRINTF(WM_DEBUG_TX,
1316 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1317 sc->sc_dev.dv_xname, nexttx,
1318 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1319 (uint32_t) dmamap->dm_segs[seg].ds_len));
1320 }
1321
1322 /*
1323 * Set up the command byte on the last descriptor of
1324 * the packet. If we're in the interrupt delay window,
1325 * delay the interrupt.
1326 */
1327 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1328 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1329
1330 #if 0 /* XXXJRT */
1331 /*
1332 * If VLANs are enabled and the packet has a VLAN tag, set
1333 * up the descriptor to encapsulate the packet for us.
1334 *
1335 * This is only valid on the last descriptor of the packet.
1336 */
1337 if (sc->sc_ethercom.ec_nvlans != 0 &&
1338 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1339 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1340 htole32(WTX_CMD_VLE);
1341 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1342 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1343 }
1344 #endif /* XXXJRT */
1345
1346 txs->txs_lastdesc = lasttx;
1347
1348 DPRINTF(WM_DEBUG_TX,
1349 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1350 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1351
1352 /* Sync the descriptors we're using. */
1353 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1354 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1355
1356 /* Give the packet to the chip. */
1357 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1358
1359 DPRINTF(WM_DEBUG_TX,
1360 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1361
1362 DPRINTF(WM_DEBUG_TX,
1363 ("%s: TX: finished transmitting packet, job %d\n",
1364 sc->sc_dev.dv_xname, sc->sc_txsnext));
1365
1366 /* Advance the tx pointer. */
1367 sc->sc_txfree -= txs->txs_ndesc;
1368 sc->sc_txnext = nexttx;
1369
1370 sc->sc_txsfree--;
1371 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1372
1373 #if NBPFILTER > 0
1374 /* Pass the packet to any BPF listeners. */
1375 if (ifp->if_bpf)
1376 bpf_mtap(ifp->if_bpf, m0);
1377 #endif /* NBPFILTER > 0 */
1378 }
1379
1380 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1381 /* No more slots; notify upper layer. */
1382 ifp->if_flags |= IFF_OACTIVE;
1383 }
1384
1385 if (sc->sc_txfree != ofree) {
1386 /* Set a watchdog timer in case the chip flakes out. */
1387 ifp->if_timer = 5;
1388 }
1389 }
1390
1391 /*
1392 * wm_watchdog: [ifnet interface function]
1393 *
1394 * Watchdog timer handler.
1395 */
1396 void
1397 wm_watchdog(struct ifnet *ifp)
1398 {
1399 struct wm_softc *sc = ifp->if_softc;
1400
1401 /*
1402 * Since we're using delayed interrupts, sweep up
1403 * before we report an error.
1404 */
1405 wm_txintr(sc);
1406
1407 if (sc->sc_txfree != WM_NTXDESC) {
1408 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1409 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1410 sc->sc_txnext);
1411 ifp->if_oerrors++;
1412
1413 /* Reset the interface. */
1414 (void) wm_init(ifp);
1415 }
1416
1417 /* Try to get more packets going. */
1418 wm_start(ifp);
1419 }
1420
1421 /*
1422 * wm_ioctl: [ifnet interface function]
1423 *
1424 * Handle control requests from the operator.
1425 */
1426 int
1427 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1428 {
1429 struct wm_softc *sc = ifp->if_softc;
1430 struct ifreq *ifr = (struct ifreq *) data;
1431 int s, error;
1432
1433 s = splnet();
1434
1435 switch (cmd) {
1436 case SIOCSIFMEDIA:
1437 case SIOCGIFMEDIA:
1438 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1439 break;
1440 default:
1441 error = ether_ioctl(ifp, cmd, data);
1442 if (error == ENETRESET) {
1443 /*
1444 * Multicast list has changed; set the hardware filter
1445 * accordingly.
1446 */
1447 wm_set_filter(sc);
1448 error = 0;
1449 }
1450 break;
1451 }
1452
1453 /* Try to get more packets going. */
1454 wm_start(ifp);
1455
1456 splx(s);
1457 return (error);
1458 }
1459
1460 /*
1461 * wm_intr:
1462 *
1463 * Interrupt service routine.
1464 */
1465 int
1466 wm_intr(void *arg)
1467 {
1468 struct wm_softc *sc = arg;
1469 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1470 uint32_t icr;
1471 int wantinit, handled = 0;
1472
1473 for (wantinit = 0; wantinit == 0;) {
1474 icr = CSR_READ(sc, WMREG_ICR);
1475 if ((icr & sc->sc_icr) == 0)
1476 break;
1477
1478 #if 0 /*NRND > 0*/
1479 if (RND_ENABLED(&sc->rnd_source))
1480 rnd_add_uint32(&sc->rnd_source, icr);
1481 #endif
1482
1483 handled = 1;
1484
1485 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1486 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1487 DPRINTF(WM_DEBUG_RX,
1488 ("%s: RX: got Rx intr 0x%08x\n",
1489 sc->sc_dev.dv_xname,
1490 icr & (ICR_RXDMT0|ICR_RXT0)));
1491 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1492 }
1493 #endif
1494 wm_rxintr(sc);
1495
1496 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1497 if (icr & ICR_TXDW) {
1498 DPRINTF(WM_DEBUG_TX,
1499 ("%s: TX: got TDXW interrupt\n",
1500 sc->sc_dev.dv_xname));
1501 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1502 }
1503 #endif
1504 wm_txintr(sc);
1505
1506 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1507 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1508 wm_linkintr(sc, icr);
1509 }
1510
1511 if (icr & ICR_RXO) {
1512 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1513 wantinit = 1;
1514 }
1515 }
1516
1517 if (handled) {
1518 if (wantinit)
1519 wm_init(ifp);
1520
1521 /* Try to get more packets going. */
1522 wm_start(ifp);
1523 }
1524
1525 return (handled);
1526 }
1527
1528 /*
1529 * wm_txintr:
1530 *
1531 * Helper; handle transmit interrupts.
1532 */
1533 void
1534 wm_txintr(struct wm_softc *sc)
1535 {
1536 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1537 struct wm_txsoft *txs;
1538 uint8_t status;
1539 int i;
1540
1541 ifp->if_flags &= ~IFF_OACTIVE;
1542
1543 /*
1544 * Go through the Tx list and free mbufs for those
1545 * frames which have been transmitted.
1546 */
1547 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1548 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1549 txs = &sc->sc_txsoft[i];
1550
1551 DPRINTF(WM_DEBUG_TX,
1552 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1553
1554 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1555 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1556
1557 status = le32toh(sc->sc_txdescs[
1558 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1559 if ((status & WTX_ST_DD) == 0) {
1560 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1561 BUS_DMASYNC_PREREAD);
1562 break;
1563 }
1564
1565 DPRINTF(WM_DEBUG_TX,
1566 ("%s: TX: job %d done: descs %d..%d\n",
1567 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1568 txs->txs_lastdesc));
1569
1570 /*
1571 * XXX We should probably be using the statistics
1572 * XXX registers, but I don't know if they exist
1573 * XXX on chips before the i82544.
1574 */
1575
1576 #ifdef WM_EVENT_COUNTERS
1577 if (status & WTX_ST_TU)
1578 WM_EVCNT_INCR(&sc->sc_ev_tu);
1579 #endif /* WM_EVENT_COUNTERS */
1580
1581 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1582 ifp->if_oerrors++;
1583 if (status & WTX_ST_LC)
1584 printf("%s: late collision\n",
1585 sc->sc_dev.dv_xname);
1586 else if (status & WTX_ST_EC) {
1587 ifp->if_collisions += 16;
1588 printf("%s: excessive collisions\n",
1589 sc->sc_dev.dv_xname);
1590 }
1591 } else
1592 ifp->if_opackets++;
1593
1594 sc->sc_txfree += txs->txs_ndesc;
1595 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1596 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1597 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1598 m_freem(txs->txs_mbuf);
1599 txs->txs_mbuf = NULL;
1600 }
1601
1602 /* Update the dirty transmit buffer pointer. */
1603 sc->sc_txsdirty = i;
1604 DPRINTF(WM_DEBUG_TX,
1605 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1606
1607 /*
1608 * If there are no more pending transmissions, cancel the watchdog
1609 * timer.
1610 */
1611 if (sc->sc_txsfree == WM_TXQUEUELEN)
1612 ifp->if_timer = 0;
1613 }
1614
1615 /*
1616 * wm_rxintr:
1617 *
1618 * Helper; handle receive interrupts.
1619 */
1620 void
1621 wm_rxintr(struct wm_softc *sc)
1622 {
1623 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1624 struct wm_rxsoft *rxs;
1625 struct mbuf *m;
1626 int i, len;
1627 uint8_t status, errors;
1628
1629 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1630 rxs = &sc->sc_rxsoft[i];
1631
1632 DPRINTF(WM_DEBUG_RX,
1633 ("%s: RX: checking descriptor %d\n",
1634 sc->sc_dev.dv_xname, i));
1635
1636 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1637
1638 status = sc->sc_rxdescs[i].wrx_status;
1639 errors = sc->sc_rxdescs[i].wrx_errors;
1640 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1641
1642 if ((status & WRX_ST_DD) == 0) {
1643 /*
1644 * We have processed all of the receive descriptors.
1645 */
1646 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1647 break;
1648 }
1649
1650 if (__predict_false(sc->sc_rxdiscard)) {
1651 DPRINTF(WM_DEBUG_RX,
1652 ("%s: RX: discarding contents of descriptor %d\n",
1653 sc->sc_dev.dv_xname, i));
1654 WM_INIT_RXDESC(sc, i);
1655 if (status & WRX_ST_EOP) {
1656 /* Reset our state. */
1657 DPRINTF(WM_DEBUG_RX,
1658 ("%s: RX: resetting rxdiscard -> 0\n",
1659 sc->sc_dev.dv_xname));
1660 sc->sc_rxdiscard = 0;
1661 }
1662 continue;
1663 }
1664
1665 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1666 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1667
1668 m = rxs->rxs_mbuf;
1669
1670 /*
1671 * Add a new receive buffer to the ring.
1672 */
1673 if (wm_add_rxbuf(sc, i) != 0) {
1674 /*
1675 * Failed, throw away what we've done so
1676 * far, and discard the rest of the packet.
1677 */
1678 ifp->if_ierrors++;
1679 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1680 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1681 WM_INIT_RXDESC(sc, i);
1682 if ((status & WRX_ST_EOP) == 0)
1683 sc->sc_rxdiscard = 1;
1684 if (sc->sc_rxhead != NULL)
1685 m_freem(sc->sc_rxhead);
1686 WM_RXCHAIN_RESET(sc);
1687 DPRINTF(WM_DEBUG_RX,
1688 ("%s: RX: Rx buffer allocation failed, "
1689 "dropping packet%s\n", sc->sc_dev.dv_xname,
1690 sc->sc_rxdiscard ? " (discard)" : ""));
1691 continue;
1692 }
1693
1694 WM_RXCHAIN_LINK(sc, m);
1695
1696 m->m_len = len;
1697
1698 DPRINTF(WM_DEBUG_RX,
1699 ("%s: RX: buffer at %p len %d\n",
1700 sc->sc_dev.dv_xname, m->m_data, len));
1701
1702 /*
1703 * If this is not the end of the packet, keep
1704 * looking.
1705 */
1706 if ((status & WRX_ST_EOP) == 0) {
1707 sc->sc_rxlen += len;
1708 DPRINTF(WM_DEBUG_RX,
1709 ("%s: RX: not yet EOP, rxlen -> %d\n",
1710 sc->sc_dev.dv_xname, sc->sc_rxlen));
1711 continue;
1712 }
1713
1714 /*
1715 * Okay, we have the entire packet now...
1716 */
1717 *sc->sc_rxtailp = NULL;
1718 m = sc->sc_rxhead;
1719 len += sc->sc_rxlen;
1720
1721 WM_RXCHAIN_RESET(sc);
1722
1723 DPRINTF(WM_DEBUG_RX,
1724 ("%s: RX: have entire packet, len -> %d\n",
1725 sc->sc_dev.dv_xname, len));
1726
1727 /*
1728 * If an error occurred, update stats and drop the packet.
1729 */
1730 if (errors &
1731 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1732 ifp->if_ierrors++;
1733 if (errors & WRX_ER_SE)
1734 printf("%s: symbol error\n",
1735 sc->sc_dev.dv_xname);
1736 else if (errors & WRX_ER_SEQ)
1737 printf("%s: receive sequence error\n",
1738 sc->sc_dev.dv_xname);
1739 else if (errors & WRX_ER_CE)
1740 printf("%s: CRC error\n",
1741 sc->sc_dev.dv_xname);
1742 m_freem(m);
1743 continue;
1744 }
1745
1746 /*
1747 * No errors. Receive the packet.
1748 *
1749 * Note, we have configured the chip to include the
1750 * CRC with every packet.
1751 */
1752 m->m_flags |= M_HASFCS;
1753 m->m_pkthdr.rcvif = ifp;
1754 m->m_pkthdr.len = len;
1755
1756 #if 0 /* XXXJRT */
1757 /*
1758 * If VLANs are enabled, VLAN packets have been unwrapped
1759 * for us. Associate the tag with the packet.
1760 */
1761 if (sc->sc_ethercom.ec_nvlans != 0 &&
1762 (status & WRX_ST_VP) != 0) {
1763 struct m_tag *vtag;
1764
1765 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1766 M_NOWAIT);
1767 if (vtag == NULL) {
1768 ifp->if_ierrors++;
1769 printf("%s: unable to allocate VLAN tag\n",
1770 sc->sc_dev.dv_xname);
1771 m_freem(m);
1772 continue;
1773 }
1774
1775 *(u_int *)(vtag + 1) =
1776 le16toh(sc->sc_rxdescs[i].wrx_special);
1777 }
1778 #endif /* XXXJRT */
1779
1780 /*
1781 * Set up checksum info for this packet.
1782 */
1783 if (status & WRX_ST_IPCS) {
1784 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1785 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1786 if (errors & WRX_ER_IPE)
1787 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1788 }
1789 if (status & WRX_ST_TCPCS) {
1790 /*
1791 * Note: we don't know if this was TCP or UDP,
1792 * so we just set both bits, and expect the
1793 * upper layers to deal.
1794 */
1795 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1796 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1797 if (errors & WRX_ER_TCPE)
1798 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1799 }
1800
1801 ifp->if_ipackets++;
1802
1803 #if NBPFILTER > 0
1804 /* Pass this up to any BPF listeners. */
1805 if (ifp->if_bpf)
1806 bpf_mtap(ifp->if_bpf, m);
1807 #endif /* NBPFILTER > 0 */
1808
1809 /* Pass it on. */
1810 (*ifp->if_input)(ifp, m);
1811 }
1812
1813 /* Update the receive pointer. */
1814 sc->sc_rxptr = i;
1815
1816 DPRINTF(WM_DEBUG_RX,
1817 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1818 }
1819
1820 /*
1821 * wm_linkintr:
1822 *
1823 * Helper; handle link interrupts.
1824 */
1825 void
1826 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1827 {
1828 uint32_t status;
1829
1830 /*
1831 * If we get a link status interrupt on a 1000BASE-T
1832 * device, just fall into the normal MII tick path.
1833 */
1834 if (sc->sc_flags & WM_F_HAS_MII) {
1835 if (icr & ICR_LSC) {
1836 DPRINTF(WM_DEBUG_LINK,
1837 ("%s: LINK: LSC -> mii_tick\n",
1838 sc->sc_dev.dv_xname));
1839 mii_tick(&sc->sc_mii);
1840 } else if (icr & ICR_RXSEQ) {
1841 DPRINTF(WM_DEBUG_LINK,
1842 ("%s: LINK Receive sequence error\n",
1843 sc->sc_dev.dv_xname));
1844 }
1845 return;
1846 }
1847
1848 /*
1849 * If we are now receiving /C/, check for link again in
1850 * a couple of link clock ticks.
1851 */
1852 if (icr & ICR_RXCFG) {
1853 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1854 sc->sc_dev.dv_xname));
1855 sc->sc_tbi_anstate = 2;
1856 }
1857
1858 if (icr & ICR_LSC) {
1859 status = CSR_READ(sc, WMREG_STATUS);
1860 if (status & STATUS_LU) {
1861 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1862 sc->sc_dev.dv_xname,
1863 (status & STATUS_FD) ? "FDX" : "HDX"));
1864 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1865 if (status & STATUS_FD)
1866 sc->sc_tctl |=
1867 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1868 else
1869 sc->sc_tctl |=
1870 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1871 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1872 sc->sc_tbi_linkup = 1;
1873 } else {
1874 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1875 sc->sc_dev.dv_xname));
1876 sc->sc_tbi_linkup = 0;
1877 }
1878 sc->sc_tbi_anstate = 2;
1879 wm_tbi_set_linkled(sc);
1880 } else if (icr & ICR_RXSEQ) {
1881 DPRINTF(WM_DEBUG_LINK,
1882 ("%s: LINK: Receive sequence error\n",
1883 sc->sc_dev.dv_xname));
1884 }
1885 }
1886
1887 /*
1888 * wm_tick:
1889 *
1890 * One second timer, used to check link status, sweep up
1891 * completed transmit jobs, etc.
1892 */
1893 void
1894 wm_tick(void *arg)
1895 {
1896 struct wm_softc *sc = arg;
1897 int s;
1898
1899 s = splnet();
1900
1901 if (sc->sc_flags & WM_F_HAS_MII)
1902 mii_tick(&sc->sc_mii);
1903 else
1904 wm_tbi_check_link(sc);
1905
1906 splx(s);
1907
1908 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1909 }
1910
1911 /*
1912 * wm_reset:
1913 *
1914 * Reset the i82542 chip.
1915 */
1916 void
1917 wm_reset(struct wm_softc *sc)
1918 {
1919 int i;
1920
1921 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1922 delay(10000);
1923
1924 for (i = 0; i < 1000; i++) {
1925 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1926 return;
1927 delay(20);
1928 }
1929
1930 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1931 printf("%s: WARNING: reset failed to complete\n",
1932 sc->sc_dev.dv_xname);
1933 }
1934
1935 /*
1936 * wm_init: [ifnet interface function]
1937 *
1938 * Initialize the interface. Must be called at splnet().
1939 */
1940 int
1941 wm_init(struct ifnet *ifp)
1942 {
1943 struct wm_softc *sc = ifp->if_softc;
1944 struct wm_rxsoft *rxs;
1945 int i, error = 0;
1946 uint32_t reg;
1947
1948 /*
1949 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1950 * There is a small but measurable benefit to avoiding the adjusment
1951 * of the descriptor so that the headers are aligned, for normal mtu,
1952 * on such platforms. One possibility is that the DMA itself is
1953 * slightly more efficient if the front of the entire packet (instead
1954 * of the front of the headers) is aligned.
1955 *
1956 * Note we must always set align_tweak to 0 if we are using
1957 * jumbo frames.
1958 */
1959 #ifdef __NO_STRICT_ALIGNMENT
1960 sc->sc_align_tweak = 0;
1961 #else
1962 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1963 sc->sc_align_tweak = 0;
1964 else
1965 sc->sc_align_tweak = 2;
1966 #endif /* __NO_STRICT_ALIGNMENT */
1967
1968 /* Cancel any pending I/O. */
1969 wm_stop(ifp, 0);
1970
1971 /* Reset the chip to a known state. */
1972 wm_reset(sc);
1973
1974 /* Initialize the transmit descriptor ring. */
1975 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1976 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1977 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1978 sc->sc_txfree = WM_NTXDESC;
1979 sc->sc_txnext = 0;
1980
1981 sc->sc_txctx_ipcs = 0xffffffff;
1982 sc->sc_txctx_tucs = 0xffffffff;
1983
1984 if (sc->sc_type < WM_T_82543) {
1985 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1986 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1987 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1988 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1989 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1990 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1991 } else {
1992 CSR_WRITE(sc, WMREG_TBDAH, 0);
1993 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1994 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1995 CSR_WRITE(sc, WMREG_TDH, 0);
1996 CSR_WRITE(sc, WMREG_TDT, 0);
1997 CSR_WRITE(sc, WMREG_TIDV, 128);
1998
1999 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2000 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2001 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2002 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2003 }
2004 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2005 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2006
2007 /* Initialize the transmit job descriptors. */
2008 for (i = 0; i < WM_TXQUEUELEN; i++)
2009 sc->sc_txsoft[i].txs_mbuf = NULL;
2010 sc->sc_txsfree = WM_TXQUEUELEN;
2011 sc->sc_txsnext = 0;
2012 sc->sc_txsdirty = 0;
2013
2014 /*
2015 * Initialize the receive descriptor and receive job
2016 * descriptor rings.
2017 */
2018 if (sc->sc_type < WM_T_82543) {
2019 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2020 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2021 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2022 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2023 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2024 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2025
2026 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2027 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2028 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2029 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2030 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2031 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2032 } else {
2033 CSR_WRITE(sc, WMREG_RDBAH, 0);
2034 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2035 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2036 CSR_WRITE(sc, WMREG_RDH, 0);
2037 CSR_WRITE(sc, WMREG_RDT, 0);
2038 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2039 }
2040 for (i = 0; i < WM_NRXDESC; i++) {
2041 rxs = &sc->sc_rxsoft[i];
2042 if (rxs->rxs_mbuf == NULL) {
2043 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2044 printf("%s: unable to allocate or map rx "
2045 "buffer %d, error = %d\n",
2046 sc->sc_dev.dv_xname, i, error);
2047 /*
2048 * XXX Should attempt to run with fewer receive
2049 * XXX buffers instead of just failing.
2050 */
2051 wm_rxdrain(sc);
2052 goto out;
2053 }
2054 } else
2055 WM_INIT_RXDESC(sc, i);
2056 }
2057 sc->sc_rxptr = 0;
2058 sc->sc_rxdiscard = 0;
2059 WM_RXCHAIN_RESET(sc);
2060
2061 /*
2062 * Clear out the VLAN table -- we don't use it (yet).
2063 */
2064 CSR_WRITE(sc, WMREG_VET, 0);
2065 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2066 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2067
2068 /*
2069 * Set up flow-control parameters.
2070 *
2071 * XXX Values could probably stand some tuning.
2072 */
2073 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2074 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2075 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2076 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2077
2078 if (sc->sc_type < WM_T_82543) {
2079 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2080 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2081 } else {
2082 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2083 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2084 }
2085 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2086 }
2087
2088 #if 0 /* XXXJRT */
2089 /* Deal with VLAN enables. */
2090 if (sc->sc_ethercom.ec_nvlans != 0)
2091 sc->sc_ctrl |= CTRL_VME;
2092 else
2093 #endif /* XXXJRT */
2094 sc->sc_ctrl &= ~CTRL_VME;
2095
2096 /* Write the control registers. */
2097 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2098 #if 0
2099 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2100 #endif
2101
2102 /*
2103 * Set up checksum offload parameters.
2104 */
2105 reg = CSR_READ(sc, WMREG_RXCSUM);
2106 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2107 reg |= RXCSUM_IPOFL;
2108 else
2109 reg &= ~RXCSUM_IPOFL;
2110 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2111 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2112 else {
2113 reg &= ~RXCSUM_TUOFL;
2114 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2115 reg &= ~RXCSUM_IPOFL;
2116 }
2117 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2118
2119 /*
2120 * Set up the interrupt registers.
2121 */
2122 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2123 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2124 ICR_RXO | ICR_RXT0;
2125 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2126 sc->sc_icr |= ICR_RXCFG;
2127 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2128
2129 /* Set up the inter-packet gap. */
2130 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2131
2132 #if 0 /* XXXJRT */
2133 /* Set the VLAN ethernetype. */
2134 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2135 #endif
2136
2137 /*
2138 * Set up the transmit control register; we start out with
2139 * a collision distance suitable for FDX, but update it whe
2140 * we resolve the media type.
2141 */
2142 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2143 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2144 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2145
2146 /* Set the media. */
2147 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2148
2149 /*
2150 * Set up the receive control register; we actually program
2151 * the register when we set the receive filter. Use multicast
2152 * address offset type 0.
2153 *
2154 * Only the i82544 has the ability to strip the incoming
2155 * CRC, so we don't enable that feature.
2156 */
2157 sc->sc_mchash_type = 0;
2158 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2159 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2160
2161 if(MCLBYTES == 2048) {
2162 sc->sc_rctl |= RCTL_2k;
2163 } else {
2164 /*
2165 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2166 * XXX segments, dropping" -- why?
2167 */
2168 #if 0
2169 if(sc->sc_type >= WM_T_82543) {
2170 switch(MCLBYTES) {
2171 case 4096:
2172 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2173 break;
2174 case 8192:
2175 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2176 break;
2177 case 16384:
2178 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2179 break;
2180 default:
2181 panic("wm_init: MCLBYTES %d unsupported",
2182 MCLBYTES);
2183 break;
2184 }
2185 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2186 #else
2187 panic("wm_init: MCLBYTES > 2048 not supported.");
2188 #endif
2189 }
2190
2191 /* Set the receive filter. */
2192 wm_set_filter(sc);
2193
2194 /* Start the one second link check clock. */
2195 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2196
2197 /* ...all done! */
2198 ifp->if_flags |= IFF_RUNNING;
2199 ifp->if_flags &= ~IFF_OACTIVE;
2200
2201 out:
2202 if (error)
2203 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2204 return (error);
2205 }
2206
2207 /*
2208 * wm_rxdrain:
2209 *
2210 * Drain the receive queue.
2211 */
2212 void
2213 wm_rxdrain(struct wm_softc *sc)
2214 {
2215 struct wm_rxsoft *rxs;
2216 int i;
2217
2218 for (i = 0; i < WM_NRXDESC; i++) {
2219 rxs = &sc->sc_rxsoft[i];
2220 if (rxs->rxs_mbuf != NULL) {
2221 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2222 m_freem(rxs->rxs_mbuf);
2223 rxs->rxs_mbuf = NULL;
2224 }
2225 }
2226 }
2227
2228 /*
2229 * wm_stop: [ifnet interface function]
2230 *
2231 * Stop transmission on the interface.
2232 */
2233 void
2234 wm_stop(struct ifnet *ifp, int disable)
2235 {
2236 struct wm_softc *sc = ifp->if_softc;
2237 struct wm_txsoft *txs;
2238 int i;
2239
2240 /* Stop the one second clock. */
2241 callout_stop(&sc->sc_tick_ch);
2242
2243 if (sc->sc_flags & WM_F_HAS_MII) {
2244 /* Down the MII. */
2245 mii_down(&sc->sc_mii);
2246 }
2247
2248 /* Stop the transmit and receive processes. */
2249 CSR_WRITE(sc, WMREG_TCTL, 0);
2250 CSR_WRITE(sc, WMREG_RCTL, 0);
2251
2252 /* Release any queued transmit buffers. */
2253 for (i = 0; i < WM_TXQUEUELEN; i++) {
2254 txs = &sc->sc_txsoft[i];
2255 if (txs->txs_mbuf != NULL) {
2256 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2257 m_freem(txs->txs_mbuf);
2258 txs->txs_mbuf = NULL;
2259 }
2260 }
2261
2262 if (disable)
2263 wm_rxdrain(sc);
2264
2265 /* Mark the interface as down and cancel the watchdog timer. */
2266 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2267 ifp->if_timer = 0;
2268 }
2269
2270 /*
2271 * wm_read_eeprom:
2272 *
2273 * Read data from the serial EEPROM.
2274 */
2275 void
2276 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2277 {
2278 uint32_t reg;
2279 int i, x, addrbits = 6;
2280
2281 for (i = 0; i < wordcnt; i++) {
2282 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2283 reg = CSR_READ(sc, WMREG_EECD);
2284
2285 /* Get number of address bits. */
2286 if (reg & EECD_EE_SIZE)
2287 addrbits = 8;
2288
2289 /* Request EEPROM access. */
2290 reg |= EECD_EE_REQ;
2291 CSR_WRITE(sc, WMREG_EECD, reg);
2292
2293 /* ..and wait for it to be granted. */
2294 for (x = 0; x < 100; x++) {
2295 reg = CSR_READ(sc, WMREG_EECD);
2296 if (reg & EECD_EE_GNT)
2297 break;
2298 delay(5);
2299 }
2300 if ((reg & EECD_EE_GNT) == 0) {
2301 printf("%s: could not acquire EEPROM GNT\n",
2302 sc->sc_dev.dv_xname);
2303 *data = 0xffff;
2304 reg &= ~EECD_EE_REQ;
2305 CSR_WRITE(sc, WMREG_EECD, reg);
2306 continue;
2307 }
2308 } else
2309 reg = 0;
2310
2311 /* Clear SK and DI. */
2312 reg &= ~(EECD_SK | EECD_DI);
2313 CSR_WRITE(sc, WMREG_EECD, reg);
2314
2315 /* Set CHIP SELECT. */
2316 reg |= EECD_CS;
2317 CSR_WRITE(sc, WMREG_EECD, reg);
2318 delay(2);
2319
2320 /* Shift in the READ command. */
2321 for (x = 3; x > 0; x--) {
2322 if (UWIRE_OPC_READ & (1 << (x - 1)))
2323 reg |= EECD_DI;
2324 else
2325 reg &= ~EECD_DI;
2326 CSR_WRITE(sc, WMREG_EECD, reg);
2327 delay(2);
2328 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2329 delay(2);
2330 CSR_WRITE(sc, WMREG_EECD, reg);
2331 delay(2);
2332 }
2333
2334 /* Shift in address. */
2335 for (x = addrbits; x > 0; x--) {
2336 if ((word + i) & (1 << (x - 1)))
2337 reg |= EECD_DI;
2338 else
2339 reg &= ~EECD_DI;
2340 CSR_WRITE(sc, WMREG_EECD, reg);
2341 delay(2);
2342 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2343 delay(2);
2344 CSR_WRITE(sc, WMREG_EECD, reg);
2345 delay(2);
2346 }
2347
2348 /* Shift out the data. */
2349 reg &= ~EECD_DI;
2350 data[i] = 0;
2351 for (x = 16; x > 0; x--) {
2352 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2353 delay(2);
2354 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2355 data[i] |= (1 << (x - 1));
2356 CSR_WRITE(sc, WMREG_EECD, reg);
2357 delay(2);
2358 }
2359
2360 /* Clear CHIP SELECT. */
2361 reg &= ~EECD_CS;
2362 CSR_WRITE(sc, WMREG_EECD, reg);
2363 delay(2);
2364
2365 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2366 /* Release the EEPROM. */
2367 reg &= ~EECD_EE_REQ;
2368 CSR_WRITE(sc, WMREG_EECD, reg);
2369 }
2370 }
2371 }
2372
2373 /*
2374 * wm_add_rxbuf:
2375 *
2376 * Add a receive buffer to the indiciated descriptor.
2377 */
2378 int
2379 wm_add_rxbuf(struct wm_softc *sc, int idx)
2380 {
2381 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2382 struct mbuf *m;
2383 int error;
2384
2385 MGETHDR(m, M_DONTWAIT, MT_DATA);
2386 if (m == NULL)
2387 return (ENOBUFS);
2388
2389 MCLGET(m, M_DONTWAIT);
2390 if ((m->m_flags & M_EXT) == 0) {
2391 m_freem(m);
2392 return (ENOBUFS);
2393 }
2394
2395 if (rxs->rxs_mbuf != NULL)
2396 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2397
2398 rxs->rxs_mbuf = m;
2399
2400 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2401 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2402 BUS_DMA_READ|BUS_DMA_NOWAIT);
2403 if (error) {
2404 printf("%s: unable to load rx DMA map %d, error = %d\n",
2405 sc->sc_dev.dv_xname, idx, error);
2406 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2407 }
2408
2409 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2410 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2411
2412 WM_INIT_RXDESC(sc, idx);
2413
2414 return (0);
2415 }
2416
2417 /*
2418 * wm_set_ral:
2419 *
2420 * Set an entery in the receive address list.
2421 */
2422 static void
2423 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2424 {
2425 uint32_t ral_lo, ral_hi;
2426
2427 if (enaddr != NULL) {
2428 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2429 (enaddr[3] << 24);
2430 ral_hi = enaddr[4] | (enaddr[5] << 8);
2431 ral_hi |= RAL_AV;
2432 } else {
2433 ral_lo = 0;
2434 ral_hi = 0;
2435 }
2436
2437 if (sc->sc_type >= WM_T_82544) {
2438 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2439 ral_lo);
2440 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2441 ral_hi);
2442 } else {
2443 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2444 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2445 }
2446 }
2447
2448 /*
2449 * wm_mchash:
2450 *
2451 * Compute the hash of the multicast address for the 4096-bit
2452 * multicast filter.
2453 */
2454 static uint32_t
2455 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2456 {
2457 static const int lo_shift[4] = { 4, 3, 2, 0 };
2458 static const int hi_shift[4] = { 4, 5, 6, 8 };
2459 uint32_t hash;
2460
2461 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2462 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2463
2464 return (hash & 0xfff);
2465 }
2466
2467 /*
2468 * wm_set_filter:
2469 *
2470 * Set up the receive filter.
2471 */
2472 void
2473 wm_set_filter(struct wm_softc *sc)
2474 {
2475 struct ethercom *ec = &sc->sc_ethercom;
2476 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2477 struct ether_multi *enm;
2478 struct ether_multistep step;
2479 bus_addr_t mta_reg;
2480 uint32_t hash, reg, bit;
2481 int i;
2482
2483 if (sc->sc_type >= WM_T_82544)
2484 mta_reg = WMREG_CORDOVA_MTA;
2485 else
2486 mta_reg = WMREG_MTA;
2487
2488 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2489
2490 if (ifp->if_flags & IFF_BROADCAST)
2491 sc->sc_rctl |= RCTL_BAM;
2492 if (ifp->if_flags & IFF_PROMISC) {
2493 sc->sc_rctl |= RCTL_UPE;
2494 goto allmulti;
2495 }
2496
2497 /*
2498 * Set the station address in the first RAL slot, and
2499 * clear the remaining slots.
2500 */
2501 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2502 for (i = 1; i < WM_RAL_TABSIZE; i++)
2503 wm_set_ral(sc, NULL, i);
2504
2505 /* Clear out the multicast table. */
2506 for (i = 0; i < WM_MC_TABSIZE; i++)
2507 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2508
2509 ETHER_FIRST_MULTI(step, ec, enm);
2510 while (enm != NULL) {
2511 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2512 /*
2513 * We must listen to a range of multicast addresses.
2514 * For now, just accept all multicasts, rather than
2515 * trying to set only those filter bits needed to match
2516 * the range. (At this time, the only use of address
2517 * ranges is for IP multicast routing, for which the
2518 * range is big enough to require all bits set.)
2519 */
2520 goto allmulti;
2521 }
2522
2523 hash = wm_mchash(sc, enm->enm_addrlo);
2524
2525 reg = (hash >> 5) & 0x7f;
2526 bit = hash & 0x1f;
2527
2528 hash = CSR_READ(sc, mta_reg + (reg << 2));
2529 hash |= 1U << bit;
2530
2531 /* XXX Hardware bug?? */
2532 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2533 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2534 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2535 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2536 } else
2537 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2538
2539 ETHER_NEXT_MULTI(step, enm);
2540 }
2541
2542 ifp->if_flags &= ~IFF_ALLMULTI;
2543 goto setit;
2544
2545 allmulti:
2546 ifp->if_flags |= IFF_ALLMULTI;
2547 sc->sc_rctl |= RCTL_MPE;
2548
2549 setit:
2550 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2551 }
2552
2553 /*
2554 * wm_tbi_mediainit:
2555 *
2556 * Initialize media for use on 1000BASE-X devices.
2557 */
2558 void
2559 wm_tbi_mediainit(struct wm_softc *sc)
2560 {
2561 const char *sep = "";
2562
2563 if (sc->sc_type < WM_T_82543)
2564 sc->sc_tipg = TIPG_WM_DFLT;
2565 else
2566 sc->sc_tipg = TIPG_LG_DFLT;
2567
2568 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2569 wm_tbi_mediastatus);
2570
2571 /*
2572 * SWD Pins:
2573 *
2574 * 0 = Link LED (output)
2575 * 1 = Loss Of Signal (input)
2576 */
2577 sc->sc_ctrl |= CTRL_SWDPIO(0);
2578 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2579
2580 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2581
2582 #define ADD(ss, mm, dd) \
2583 do { \
2584 printf("%s%s", sep, ss); \
2585 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2586 sep = ", "; \
2587 } while (/*CONSTCOND*/0)
2588
2589 printf("%s: ", sc->sc_dev.dv_xname);
2590 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2591 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2592 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2593 printf("\n");
2594
2595 #undef ADD
2596
2597 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2598 }
2599
2600 /*
2601 * wm_tbi_mediastatus: [ifmedia interface function]
2602 *
2603 * Get the current interface media status on a 1000BASE-X device.
2604 */
2605 void
2606 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2607 {
2608 struct wm_softc *sc = ifp->if_softc;
2609
2610 ifmr->ifm_status = IFM_AVALID;
2611 ifmr->ifm_active = IFM_ETHER;
2612
2613 if (sc->sc_tbi_linkup == 0) {
2614 ifmr->ifm_active |= IFM_NONE;
2615 return;
2616 }
2617
2618 ifmr->ifm_status |= IFM_ACTIVE;
2619 ifmr->ifm_active |= IFM_1000_SX;
2620 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2621 ifmr->ifm_active |= IFM_FDX;
2622 }
2623
2624 /*
2625 * wm_tbi_mediachange: [ifmedia interface function]
2626 *
2627 * Set hardware to newly-selected media on a 1000BASE-X device.
2628 */
2629 int
2630 wm_tbi_mediachange(struct ifnet *ifp)
2631 {
2632 struct wm_softc *sc = ifp->if_softc;
2633 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2634 uint32_t status;
2635 int i;
2636
2637 sc->sc_txcw = ife->ifm_data;
2638 if (sc->sc_ctrl & CTRL_RFCE)
2639 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2640 if (sc->sc_ctrl & CTRL_TFCE)
2641 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2642 sc->sc_txcw |= TXCW_ANE;
2643
2644 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2645 delay(10000);
2646
2647 sc->sc_tbi_anstate = 0;
2648
2649 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2650 /* Have signal; wait for the link to come up. */
2651 for (i = 0; i < 50; i++) {
2652 delay(10000);
2653 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2654 break;
2655 }
2656
2657 status = CSR_READ(sc, WMREG_STATUS);
2658 if (status & STATUS_LU) {
2659 /* Link is up. */
2660 DPRINTF(WM_DEBUG_LINK,
2661 ("%s: LINK: set media -> link up %s\n",
2662 sc->sc_dev.dv_xname,
2663 (status & STATUS_FD) ? "FDX" : "HDX"));
2664 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2665 if (status & STATUS_FD)
2666 sc->sc_tctl |=
2667 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2668 else
2669 sc->sc_tctl |=
2670 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2671 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2672 sc->sc_tbi_linkup = 1;
2673 } else {
2674 /* Link is down. */
2675 DPRINTF(WM_DEBUG_LINK,
2676 ("%s: LINK: set media -> link down\n",
2677 sc->sc_dev.dv_xname));
2678 sc->sc_tbi_linkup = 0;
2679 }
2680 } else {
2681 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2682 sc->sc_dev.dv_xname));
2683 sc->sc_tbi_linkup = 0;
2684 }
2685
2686 wm_tbi_set_linkled(sc);
2687
2688 return (0);
2689 }
2690
2691 /*
2692 * wm_tbi_set_linkled:
2693 *
2694 * Update the link LED on 1000BASE-X devices.
2695 */
2696 void
2697 wm_tbi_set_linkled(struct wm_softc *sc)
2698 {
2699
2700 if (sc->sc_tbi_linkup)
2701 sc->sc_ctrl |= CTRL_SWDPIN(0);
2702 else
2703 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2704
2705 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2706 }
2707
2708 /*
2709 * wm_tbi_check_link:
2710 *
2711 * Check the link on 1000BASE-X devices.
2712 */
2713 void
2714 wm_tbi_check_link(struct wm_softc *sc)
2715 {
2716 uint32_t rxcw, ctrl, status;
2717
2718 if (sc->sc_tbi_anstate == 0)
2719 return;
2720 else if (sc->sc_tbi_anstate > 1) {
2721 DPRINTF(WM_DEBUG_LINK,
2722 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2723 sc->sc_tbi_anstate));
2724 sc->sc_tbi_anstate--;
2725 return;
2726 }
2727
2728 sc->sc_tbi_anstate = 0;
2729
2730 rxcw = CSR_READ(sc, WMREG_RXCW);
2731 ctrl = CSR_READ(sc, WMREG_CTRL);
2732 status = CSR_READ(sc, WMREG_STATUS);
2733
2734 if ((status & STATUS_LU) == 0) {
2735 DPRINTF(WM_DEBUG_LINK,
2736 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2737 sc->sc_tbi_linkup = 0;
2738 } else {
2739 DPRINTF(WM_DEBUG_LINK,
2740 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2741 (status & STATUS_FD) ? "FDX" : "HDX"));
2742 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2743 if (status & STATUS_FD)
2744 sc->sc_tctl |=
2745 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2746 else
2747 sc->sc_tctl |=
2748 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2749 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2750 sc->sc_tbi_linkup = 1;
2751 }
2752
2753 wm_tbi_set_linkled(sc);
2754 }
2755
2756 /*
2757 * wm_gmii_reset:
2758 *
2759 * Reset the PHY.
2760 */
2761 void
2762 wm_gmii_reset(struct wm_softc *sc)
2763 {
2764 uint32_t reg;
2765
2766 if (sc->sc_type >= WM_T_82544) {
2767 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2768 delay(20000);
2769
2770 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2771 delay(20000);
2772 } else {
2773 /* The PHY reset pin is active-low. */
2774 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2775 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2776 CTRL_EXT_SWDPIN(4));
2777 reg |= CTRL_EXT_SWDPIO(4);
2778
2779 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2780 delay(10);
2781
2782 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2783 delay(10);
2784
2785 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2786 delay(10);
2787 #if 0
2788 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2789 #endif
2790 }
2791 }
2792
2793 /*
2794 * wm_gmii_mediainit:
2795 *
2796 * Initialize media for use on 1000BASE-T devices.
2797 */
2798 void
2799 wm_gmii_mediainit(struct wm_softc *sc)
2800 {
2801 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2802
2803 /* We have MII. */
2804 sc->sc_flags |= WM_F_HAS_MII;
2805
2806 sc->sc_tipg = TIPG_1000T_DFLT;
2807
2808 /*
2809 * Let the chip set speed/duplex on its own based on
2810 * signals from the PHY.
2811 */
2812 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2813 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2814
2815 /* Initialize our media structures and probe the GMII. */
2816 sc->sc_mii.mii_ifp = ifp;
2817
2818 if (sc->sc_type >= WM_T_82544) {
2819 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2820 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2821 } else {
2822 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2823 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2824 }
2825 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2826
2827 wm_gmii_reset(sc);
2828
2829 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2830 wm_gmii_mediastatus);
2831
2832 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2833 MII_OFFSET_ANY, 0);
2834 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2835 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2836 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2837 } else
2838 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2839 }
2840
2841 /*
2842 * wm_gmii_mediastatus: [ifmedia interface function]
2843 *
2844 * Get the current interface media status on a 1000BASE-T device.
2845 */
2846 void
2847 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2848 {
2849 struct wm_softc *sc = ifp->if_softc;
2850
2851 mii_pollstat(&sc->sc_mii);
2852 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2853 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2854 }
2855
2856 /*
2857 * wm_gmii_mediachange: [ifmedia interface function]
2858 *
2859 * Set hardware to newly-selected media on a 1000BASE-T device.
2860 */
2861 int
2862 wm_gmii_mediachange(struct ifnet *ifp)
2863 {
2864 struct wm_softc *sc = ifp->if_softc;
2865
2866 if (ifp->if_flags & IFF_UP)
2867 mii_mediachg(&sc->sc_mii);
2868 return (0);
2869 }
2870
2871 #define MDI_IO CTRL_SWDPIN(2)
2872 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2873 #define MDI_CLK CTRL_SWDPIN(3)
2874
2875 static void
2876 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2877 {
2878 uint32_t i, v;
2879
2880 v = CSR_READ(sc, WMREG_CTRL);
2881 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2882 v |= MDI_DIR | CTRL_SWDPIO(3);
2883
2884 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2885 if (data & i)
2886 v |= MDI_IO;
2887 else
2888 v &= ~MDI_IO;
2889 CSR_WRITE(sc, WMREG_CTRL, v);
2890 delay(10);
2891 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2892 delay(10);
2893 CSR_WRITE(sc, WMREG_CTRL, v);
2894 delay(10);
2895 }
2896 }
2897
2898 static uint32_t
2899 i82543_mii_recvbits(struct wm_softc *sc)
2900 {
2901 uint32_t v, i, data = 0;
2902
2903 v = CSR_READ(sc, WMREG_CTRL);
2904 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2905 v |= CTRL_SWDPIO(3);
2906
2907 CSR_WRITE(sc, WMREG_CTRL, v);
2908 delay(10);
2909 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2910 delay(10);
2911 CSR_WRITE(sc, WMREG_CTRL, v);
2912 delay(10);
2913
2914 for (i = 0; i < 16; i++) {
2915 data <<= 1;
2916 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2917 delay(10);
2918 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2919 data |= 1;
2920 CSR_WRITE(sc, WMREG_CTRL, v);
2921 delay(10);
2922 }
2923
2924 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2925 delay(10);
2926 CSR_WRITE(sc, WMREG_CTRL, v);
2927 delay(10);
2928
2929 return (data);
2930 }
2931
2932 #undef MDI_IO
2933 #undef MDI_DIR
2934 #undef MDI_CLK
2935
2936 /*
2937 * wm_gmii_i82543_readreg: [mii interface function]
2938 *
2939 * Read a PHY register on the GMII (i82543 version).
2940 */
2941 int
2942 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2943 {
2944 struct wm_softc *sc = (void *) self;
2945 int rv;
2946
2947 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2948 i82543_mii_sendbits(sc, reg | (phy << 5) |
2949 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2950 rv = i82543_mii_recvbits(sc) & 0xffff;
2951
2952 DPRINTF(WM_DEBUG_GMII,
2953 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2954 sc->sc_dev.dv_xname, phy, reg, rv));
2955
2956 return (rv);
2957 }
2958
2959 /*
2960 * wm_gmii_i82543_writereg: [mii interface function]
2961 *
2962 * Write a PHY register on the GMII (i82543 version).
2963 */
2964 void
2965 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2966 {
2967 struct wm_softc *sc = (void *) self;
2968
2969 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2970 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2971 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2972 (MII_COMMAND_START << 30), 32);
2973 }
2974
2975 /*
2976 * wm_gmii_i82544_readreg: [mii interface function]
2977 *
2978 * Read a PHY register on the GMII.
2979 */
2980 int
2981 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2982 {
2983 struct wm_softc *sc = (void *) self;
2984 uint32_t mdic;
2985 int i, rv;
2986
2987 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2988 MDIC_REGADD(reg));
2989
2990 for (i = 0; i < 100; i++) {
2991 mdic = CSR_READ(sc, WMREG_MDIC);
2992 if (mdic & MDIC_READY)
2993 break;
2994 delay(10);
2995 }
2996
2997 if ((mdic & MDIC_READY) == 0) {
2998 printf("%s: MDIC read timed out: phy %d reg %d\n",
2999 sc->sc_dev.dv_xname, phy, reg);
3000 rv = 0;
3001 } else if (mdic & MDIC_E) {
3002 #if 0 /* This is normal if no PHY is present. */
3003 printf("%s: MDIC read error: phy %d reg %d\n",
3004 sc->sc_dev.dv_xname, phy, reg);
3005 #endif
3006 rv = 0;
3007 } else {
3008 rv = MDIC_DATA(mdic);
3009 if (rv == 0xffff)
3010 rv = 0;
3011 }
3012
3013 return (rv);
3014 }
3015
3016 /*
3017 * wm_gmii_i82544_writereg: [mii interface function]
3018 *
3019 * Write a PHY register on the GMII.
3020 */
3021 void
3022 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3023 {
3024 struct wm_softc *sc = (void *) self;
3025 uint32_t mdic;
3026 int i;
3027
3028 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3029 MDIC_REGADD(reg) | MDIC_DATA(val));
3030
3031 for (i = 0; i < 100; i++) {
3032 mdic = CSR_READ(sc, WMREG_MDIC);
3033 if (mdic & MDIC_READY)
3034 break;
3035 delay(10);
3036 }
3037
3038 if ((mdic & MDIC_READY) == 0)
3039 printf("%s: MDIC write timed out: phy %d reg %d\n",
3040 sc->sc_dev.dv_xname, phy, reg);
3041 else if (mdic & MDIC_E)
3042 printf("%s: MDIC write error: phy %d reg %d\n",
3043 sc->sc_dev.dv_xname, phy, reg);
3044 }
3045
3046 /*
3047 * wm_gmii_statchg: [mii interface function]
3048 *
3049 * Callback from MII layer when media changes.
3050 */
3051 void
3052 wm_gmii_statchg(struct device *self)
3053 {
3054 struct wm_softc *sc = (void *) self;
3055
3056 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3057
3058 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3059 DPRINTF(WM_DEBUG_LINK,
3060 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3061 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3062 } else {
3063 DPRINTF(WM_DEBUG_LINK,
3064 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3065 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3066 }
3067
3068 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3069 }
3070