if_wm.c revision 1.41 1 /* $NetBSD: if_wm.c,v 1.41 2003/09/10 04:02:17 tls Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 *
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.41 2003/09/10 04:02:17 tls Exp $");
49
50 #include "bpfilter.h"
51 #include "rnd.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/callout.h>
56 #include <sys/mbuf.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/ioctl.h>
61 #include <sys/errno.h>
62 #include <sys/device.h>
63 #include <sys/queue.h>
64
65 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
66
67 #if NRND > 0
68 #include <sys/rnd.h>
69 #endif
70
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/if_ether.h>
75
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79
80 #include <netinet/in.h> /* XXX for struct ip */
81 #include <netinet/in_systm.h> /* XXX for struct ip */
82 #include <netinet/ip.h> /* XXX for struct ip */
83 #include <netinet/tcp.h> /* XXX for struct tcphdr */
84
85 #include <machine/bus.h>
86 #include <machine/intr.h>
87 #include <machine/endian.h>
88
89 #include <dev/mii/mii.h>
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/mii_bitbang.h>
92
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95 #include <dev/pci/pcidevs.h>
96
97 #include <dev/pci/if_wmreg.h>
98
99 #ifdef WM_DEBUG
100 #define WM_DEBUG_LINK 0x01
101 #define WM_DEBUG_TX 0x02
102 #define WM_DEBUG_RX 0x04
103 #define WM_DEBUG_GMII 0x08
104 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
105
106 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
107 #else
108 #define DPRINTF(x, y) /* nothing */
109 #endif /* WM_DEBUG */
110
111 /*
112 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
113 * There is a small but measurable benefit to avoiding the adjusment
114 * of the descriptor so that the headers are aligned, for normal mtu,
115 * on such platforms. One possibility is that the DMA itself is
116 * slightly more efficient if the front of the entire packet (instead
117 * of the front of the headers) is aligned.
118 */
119
120 #ifdef __NO_STRICT_ALIGNMENT
121 int wm_align_tweak = 0;
122 #else
123 int wm_align_tweak = 2;
124 #endif
125
126 /*
127 * Transmit descriptor list size. Due to errata, we can only have
128 * 256 hardware descriptors in the ring. We tell the upper layers
129 * that they can queue a lot of packets, and we go ahead and manage
130 * up to 64 of them at a time. We allow up to 16 DMA segments per
131 * packet.
132 */
133 #define WM_NTXSEGS 16
134 #define WM_IFQUEUELEN 256
135 #define WM_TXQUEUELEN 64
136 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
137 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
138 #define WM_NTXDESC 256
139 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
140 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
141 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
142
143 /*
144 * Receive descriptor list size. We have one Rx buffer for normal
145 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
146 * packet. We allocate 256 receive descriptors, each with a 2k
147 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
148 */
149 #define WM_NRXDESC 256
150 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
151 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
152 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
153
154 /*
155 * Control structures are DMA'd to the i82542 chip. We allocate them in
156 * a single clump that maps to a single DMA segment to make serveral things
157 * easier.
158 */
159 struct wm_control_data {
160 /*
161 * The transmit descriptors.
162 */
163 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
164
165 /*
166 * The receive descriptors.
167 */
168 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
169 };
170
171 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
172 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
173 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
174
175 /*
176 * Software state for transmit jobs.
177 */
178 struct wm_txsoft {
179 struct mbuf *txs_mbuf; /* head of our mbuf chain */
180 bus_dmamap_t txs_dmamap; /* our DMA map */
181 int txs_firstdesc; /* first descriptor in packet */
182 int txs_lastdesc; /* last descriptor in packet */
183 int txs_ndesc; /* # of descriptors used */
184 };
185
186 /*
187 * Software state for receive buffers. Each descriptor gets a
188 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
189 * more than one buffer, we chain them together.
190 */
191 struct wm_rxsoft {
192 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
193 bus_dmamap_t rxs_dmamap; /* our DMA map */
194 };
195
196 /*
197 * Software state per device.
198 */
199 struct wm_softc {
200 struct device sc_dev; /* generic device information */
201 bus_space_tag_t sc_st; /* bus space tag */
202 bus_space_handle_t sc_sh; /* bus space handle */
203 bus_dma_tag_t sc_dmat; /* bus DMA tag */
204 struct ethercom sc_ethercom; /* ethernet common data */
205 void *sc_sdhook; /* shutdown hook */
206
207 int sc_type; /* chip type; see below */
208 int sc_flags; /* flags; see below */
209
210 void *sc_ih; /* interrupt cookie */
211
212 struct mii_data sc_mii; /* MII/media information */
213
214 struct callout sc_tick_ch; /* tick callout */
215
216 bus_dmamap_t sc_cddmamap; /* control data DMA map */
217 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
218
219 /*
220 * Software state for the transmit and receive descriptors.
221 */
222 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
223 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
224
225 /*
226 * Control data structures.
227 */
228 struct wm_control_data *sc_control_data;
229 #define sc_txdescs sc_control_data->wcd_txdescs
230 #define sc_rxdescs sc_control_data->wcd_rxdescs
231
232 #ifdef WM_EVENT_COUNTERS
233 /* Event counters. */
234 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
235 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
236 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
237 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
238 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
239 struct evcnt sc_ev_rxintr; /* Rx interrupts */
240 struct evcnt sc_ev_linkintr; /* Link interrupts */
241
242 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
243 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
244 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
245 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
246
247 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
248 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
249 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
250
251 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
252 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
253
254 struct evcnt sc_ev_tu; /* Tx underrun */
255 #endif /* WM_EVENT_COUNTERS */
256
257 bus_addr_t sc_tdt_reg; /* offset of TDT register */
258
259 int sc_txfree; /* number of free Tx descriptors */
260 int sc_txnext; /* next ready Tx descriptor */
261
262 int sc_txsfree; /* number of free Tx jobs */
263 int sc_txsnext; /* next free Tx job */
264 int sc_txsdirty; /* dirty Tx jobs */
265
266 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
267 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
268
269 bus_addr_t sc_rdt_reg; /* offset of RDT register */
270
271 int sc_rxptr; /* next ready Rx descriptor/queue ent */
272 int sc_rxdiscard;
273 int sc_rxlen;
274 struct mbuf *sc_rxhead;
275 struct mbuf *sc_rxtail;
276 struct mbuf **sc_rxtailp;
277
278 uint32_t sc_ctrl; /* prototype CTRL register */
279 #if 0
280 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
281 #endif
282 uint32_t sc_icr; /* prototype interrupt bits */
283 uint32_t sc_tctl; /* prototype TCTL register */
284 uint32_t sc_rctl; /* prototype RCTL register */
285 uint32_t sc_txcw; /* prototype TXCW register */
286 uint32_t sc_tipg; /* prototype TIPG register */
287
288 int sc_tbi_linkup; /* TBI link status */
289 int sc_tbi_anstate; /* autonegotiation state */
290
291 int sc_mchash_type; /* multicast filter offset */
292
293 #if NRND > 0
294 rndsource_element_t rnd_source; /* random source */
295 #endif
296 };
297
298 #define WM_RXCHAIN_RESET(sc) \
299 do { \
300 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
301 *(sc)->sc_rxtailp = NULL; \
302 (sc)->sc_rxlen = 0; \
303 } while (/*CONSTCOND*/0)
304
305 #define WM_RXCHAIN_LINK(sc, m) \
306 do { \
307 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
308 (sc)->sc_rxtailp = &(m)->m_next; \
309 } while (/*CONSTCOND*/0)
310
311 /* sc_type */
312 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
313 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
314 #define WM_T_82543 2 /* i82543 */
315 #define WM_T_82544 3 /* i82544 */
316 #define WM_T_82540 4 /* i82540 */
317 #define WM_T_82545 5 /* i82545 */
318 #define WM_T_82546 6 /* i82546 */
319
320 /* sc_flags */
321 #define WM_F_HAS_MII 0x01 /* has MII */
322 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
323
324 #ifdef WM_EVENT_COUNTERS
325 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
326 #else
327 #define WM_EVCNT_INCR(ev) /* nothing */
328 #endif
329
330 #define CSR_READ(sc, reg) \
331 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
332 #define CSR_WRITE(sc, reg, val) \
333 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
334
335 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
336 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
337
338 #define WM_CDTXSYNC(sc, x, n, ops) \
339 do { \
340 int __x, __n; \
341 \
342 __x = (x); \
343 __n = (n); \
344 \
345 /* If it will wrap around, sync to the end of the ring. */ \
346 if ((__x + __n) > WM_NTXDESC) { \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
349 (WM_NTXDESC - __x), (ops)); \
350 __n -= (WM_NTXDESC - __x); \
351 __x = 0; \
352 } \
353 \
354 /* Now sync whatever is left. */ \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
357 } while (/*CONSTCOND*/0)
358
359 #define WM_CDRXSYNC(sc, x, ops) \
360 do { \
361 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
362 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
363 } while (/*CONSTCOND*/0)
364
365 #define WM_INIT_RXDESC(sc, x) \
366 do { \
367 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
368 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
369 struct mbuf *__m = __rxs->rxs_mbuf; \
370 \
371 /* \
372 * Note: We scoot the packet forward 2 bytes in the buffer \
373 * so that the payload after the Ethernet header is aligned \
374 * to a 4-byte boundary. \
375 * \
376 * XXX BRAINDAMAGE ALERT! \
377 * The stupid chip uses the same size for every buffer, which \
378 * is set in the Receive Control register. We are using the 2K \
379 * size option, but what we REALLY want is (2K - 2)! For this \
380 * reason, we can't "scoot" packets longer than the standard \
381 * Ethernet MTU. On strict-alignment platforms, if the total \
382 * size exceeds (2K - 2) we set wm_align_tweak to 0 and let \
383 * the upper layer copy the headers. \
384 */ \
385 __m->m_data = __m->m_ext.ext_buf + wm_align_tweak; \
386 \
387 __rxd->wrx_addr.wa_low = \
388 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
389 wm_align_tweak); \
390 __rxd->wrx_addr.wa_high = 0; \
391 __rxd->wrx_len = 0; \
392 __rxd->wrx_cksum = 0; \
393 __rxd->wrx_status = 0; \
394 __rxd->wrx_errors = 0; \
395 __rxd->wrx_special = 0; \
396 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
397 \
398 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
399 } while (/*CONSTCOND*/0)
400
401 void wm_start(struct ifnet *);
402 void wm_watchdog(struct ifnet *);
403 int wm_ioctl(struct ifnet *, u_long, caddr_t);
404 int wm_init(struct ifnet *);
405 void wm_stop(struct ifnet *, int);
406
407 void wm_shutdown(void *);
408
409 void wm_reset(struct wm_softc *);
410 void wm_rxdrain(struct wm_softc *);
411 int wm_add_rxbuf(struct wm_softc *, int);
412 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
413 void wm_tick(void *);
414
415 void wm_set_filter(struct wm_softc *);
416
417 int wm_intr(void *);
418 void wm_txintr(struct wm_softc *);
419 void wm_rxintr(struct wm_softc *);
420 void wm_linkintr(struct wm_softc *, uint32_t);
421
422 void wm_tbi_mediainit(struct wm_softc *);
423 int wm_tbi_mediachange(struct ifnet *);
424 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
425
426 void wm_tbi_set_linkled(struct wm_softc *);
427 void wm_tbi_check_link(struct wm_softc *);
428
429 void wm_gmii_reset(struct wm_softc *);
430
431 int wm_gmii_i82543_readreg(struct device *, int, int);
432 void wm_gmii_i82543_writereg(struct device *, int, int, int);
433
434 int wm_gmii_i82544_readreg(struct device *, int, int);
435 void wm_gmii_i82544_writereg(struct device *, int, int, int);
436
437 void wm_gmii_statchg(struct device *);
438
439 void wm_gmii_mediainit(struct wm_softc *);
440 int wm_gmii_mediachange(struct ifnet *);
441 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
442
443 int wm_match(struct device *, struct cfdata *, void *);
444 void wm_attach(struct device *, struct device *, void *);
445
446 CFATTACH_DECL(wm, sizeof(struct wm_softc),
447 wm_match, wm_attach, NULL, NULL);
448
449 /*
450 * Devices supported by this driver.
451 */
452 const struct wm_product {
453 pci_vendor_id_t wmp_vendor;
454 pci_product_id_t wmp_product;
455 const char *wmp_name;
456 int wmp_type;
457 int wmp_flags;
458 #define WMP_F_1000X 0x01
459 #define WMP_F_1000T 0x02
460 } wm_products[] = {
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
462 "Intel i82542 1000BASE-X Ethernet",
463 WM_T_82542_2_1, WMP_F_1000X },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
466 "Intel i82543GC 1000BASE-X Ethernet",
467 WM_T_82543, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
470 "Intel i82543GC 1000BASE-T Ethernet",
471 WM_T_82543, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
474 "Intel i82544EI 1000BASE-T Ethernet",
475 WM_T_82544, WMP_F_1000T },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
478 "Intel i82544EI 1000BASE-X Ethernet",
479 WM_T_82544, WMP_F_1000X },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
482 "Intel i82544GC 1000BASE-T Ethernet",
483 WM_T_82544, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
486 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
487 WM_T_82544, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
490 "Intel i82540EM 1000BASE-T Ethernet",
491 WM_T_82540, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
494 "Intel i82540EP 1000BASE-T Ethernet",
495 WM_T_82540, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
498 "Intel i82540EP 1000BASE-T Ethernet",
499 WM_T_82540, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
502 "Intel i82540EP 1000BASE-T Ethernet",
503 WM_T_82540, WMP_F_1000T },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
506 "Intel i82545EM 1000BASE-T Ethernet",
507 WM_T_82545, WMP_F_1000T },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
510 "Intel i82546EB 1000BASE-T Ethernet",
511 WM_T_82546, WMP_F_1000T },
512
513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
514 "Intel i82546EB 1000BASE-T Ethernet",
515 WM_T_82546, WMP_F_1000T },
516
517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
518 "Intel i82545EM 1000BASE-X Ethernet",
519 WM_T_82545, WMP_F_1000X },
520
521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
522 "Intel i82546EB 1000BASE-X Ethernet",
523 WM_T_82546, WMP_F_1000X },
524
525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
526 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
527 WM_T_82540, WMP_F_1000T },
528
529 { 0, 0,
530 NULL,
531 0, 0 },
532 };
533
534 #ifdef WM_EVENT_COUNTERS
535 #if WM_NTXSEGS != 16
536 #error Update wm_txseg_evcnt_names
537 #endif
538 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
539 "txseg1",
540 "txseg2",
541 "txseg3",
542 "txseg4",
543 "txseg5",
544 "txseg6",
545 "txseg7",
546 "txseg8",
547 "txseg9",
548 "txseg10",
549 "txseg11",
550 "txseg12",
551 "txseg13",
552 "txseg14",
553 "txseg15",
554 "txseg16",
555 };
556 #endif /* WM_EVENT_COUNTERS */
557
558 static const struct wm_product *
559 wm_lookup(const struct pci_attach_args *pa)
560 {
561 const struct wm_product *wmp;
562
563 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
564 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
565 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
566 return (wmp);
567 }
568 return (NULL);
569 }
570
571 int
572 wm_match(struct device *parent, struct cfdata *cf, void *aux)
573 {
574 struct pci_attach_args *pa = aux;
575
576 if (wm_lookup(pa) != NULL)
577 return (1);
578
579 return (0);
580 }
581
582 void
583 wm_attach(struct device *parent, struct device *self, void *aux)
584 {
585 struct wm_softc *sc = (void *) self;
586 struct pci_attach_args *pa = aux;
587 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
588 pci_chipset_tag_t pc = pa->pa_pc;
589 pci_intr_handle_t ih;
590 const char *intrstr = NULL;
591 bus_space_tag_t memt;
592 bus_space_handle_t memh;
593 bus_dma_segment_t seg;
594 int memh_valid;
595 int i, rseg, error;
596 const struct wm_product *wmp;
597 uint8_t enaddr[ETHER_ADDR_LEN];
598 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
599 pcireg_t preg, memtype;
600 int pmreg;
601
602 callout_init(&sc->sc_tick_ch);
603
604 wmp = wm_lookup(pa);
605 if (wmp == NULL) {
606 printf("\n");
607 panic("wm_attach: impossible");
608 }
609
610 sc->sc_dmat = pa->pa_dmat;
611
612 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
613 aprint_naive(": Ethernet controller\n");
614 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
615
616 sc->sc_type = wmp->wmp_type;
617 if (sc->sc_type < WM_T_82543) {
618 if (preg < 2) {
619 aprint_error("%s: i82542 must be at least rev. 2\n",
620 sc->sc_dev.dv_xname);
621 return;
622 }
623 if (preg < 3)
624 sc->sc_type = WM_T_82542_2_0;
625 }
626
627 /*
628 * Some chips require a handshake to access the EEPROM.
629 */
630 if (sc->sc_type >= WM_T_82540)
631 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
632
633 /*
634 * Map the device.
635 */
636 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
637 switch (memtype) {
638 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
639 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
640 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
641 memtype, 0, &memt, &memh, NULL, NULL) == 0);
642 break;
643 default:
644 memh_valid = 0;
645 }
646
647 if (memh_valid) {
648 sc->sc_st = memt;
649 sc->sc_sh = memh;
650 } else {
651 aprint_error("%s: unable to map device registers\n",
652 sc->sc_dev.dv_xname);
653 return;
654 }
655
656 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
657 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
658 preg |= PCI_COMMAND_MASTER_ENABLE;
659 if (sc->sc_type < WM_T_82542_2_1)
660 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
661 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
662
663 /* Get it out of power save mode, if needed. */
664 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
665 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
666 PCI_PMCSR_STATE_MASK;
667 if (preg == PCI_PMCSR_STATE_D3) {
668 /*
669 * The card has lost all configuration data in
670 * this state, so punt.
671 */
672 aprint_error("%s: unable to wake from power state D3\n",
673 sc->sc_dev.dv_xname);
674 return;
675 }
676 if (preg != PCI_PMCSR_STATE_D0) {
677 aprint_normal("%s: waking up from power state D%d\n",
678 sc->sc_dev.dv_xname, preg);
679 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
680 PCI_PMCSR_STATE_D0);
681 }
682 }
683
684 /*
685 * Map and establish our interrupt.
686 */
687 if (pci_intr_map(pa, &ih)) {
688 aprint_error("%s: unable to map interrupt\n",
689 sc->sc_dev.dv_xname);
690 return;
691 }
692 intrstr = pci_intr_string(pc, ih);
693 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
694 if (sc->sc_ih == NULL) {
695 aprint_error("%s: unable to establish interrupt",
696 sc->sc_dev.dv_xname);
697 if (intrstr != NULL)
698 aprint_normal(" at %s", intrstr);
699 aprint_normal("\n");
700 return;
701 }
702 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
703
704 /*
705 * Allocate the control data structures, and create and load the
706 * DMA map for it.
707 */
708 if ((error = bus_dmamem_alloc(sc->sc_dmat,
709 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
710 0)) != 0) {
711 aprint_error(
712 "%s: unable to allocate control data, error = %d\n",
713 sc->sc_dev.dv_xname, error);
714 goto fail_0;
715 }
716
717 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
718 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
719 0)) != 0) {
720 aprint_error("%s: unable to map control data, error = %d\n",
721 sc->sc_dev.dv_xname, error);
722 goto fail_1;
723 }
724
725 if ((error = bus_dmamap_create(sc->sc_dmat,
726 sizeof(struct wm_control_data), 1,
727 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
728 aprint_error("%s: unable to create control data DMA map, "
729 "error = %d\n", sc->sc_dev.dv_xname, error);
730 goto fail_2;
731 }
732
733 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
734 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
735 0)) != 0) {
736 aprint_error(
737 "%s: unable to load control data DMA map, error = %d\n",
738 sc->sc_dev.dv_xname, error);
739 goto fail_3;
740 }
741
742 /*
743 * Create the transmit buffer DMA maps.
744 */
745 for (i = 0; i < WM_TXQUEUELEN; i++) {
746 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
747 WM_NTXSEGS, MCLBYTES, 0, 0,
748 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
749 aprint_error("%s: unable to create Tx DMA map %d, "
750 "error = %d\n", sc->sc_dev.dv_xname, i, error);
751 goto fail_4;
752 }
753 }
754
755 /*
756 * Create the receive buffer DMA maps.
757 */
758 for (i = 0; i < WM_NRXDESC; i++) {
759 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
760 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
761 aprint_error("%s: unable to create Rx DMA map %d, "
762 "error = %d\n", sc->sc_dev.dv_xname, i, error);
763 goto fail_5;
764 }
765 sc->sc_rxsoft[i].rxs_mbuf = NULL;
766 }
767
768 /*
769 * Reset the chip to a known state.
770 */
771 wm_reset(sc);
772
773 /*
774 * Read the Ethernet address from the EEPROM.
775 */
776 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
777 sizeof(myea) / sizeof(myea[0]), myea);
778 enaddr[0] = myea[0] & 0xff;
779 enaddr[1] = myea[0] >> 8;
780 enaddr[2] = myea[1] & 0xff;
781 enaddr[3] = myea[1] >> 8;
782 enaddr[4] = myea[2] & 0xff;
783 enaddr[5] = myea[2] >> 8;
784
785 /*
786 * Toggle the LSB of the MAC address on the second port
787 * of the i82546.
788 */
789 if (sc->sc_type == WM_T_82546) {
790 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
791 enaddr[5] ^= 1;
792 }
793
794 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
795 ether_sprintf(enaddr));
796
797 /*
798 * Read the config info from the EEPROM, and set up various
799 * bits in the control registers based on their contents.
800 */
801 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
802 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
803 if (sc->sc_type >= WM_T_82544)
804 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
805
806 if (cfg1 & EEPROM_CFG1_ILOS)
807 sc->sc_ctrl |= CTRL_ILOS;
808 if (sc->sc_type >= WM_T_82544) {
809 sc->sc_ctrl |=
810 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
811 CTRL_SWDPIO_SHIFT;
812 sc->sc_ctrl |=
813 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
814 CTRL_SWDPINS_SHIFT;
815 } else {
816 sc->sc_ctrl |=
817 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
818 CTRL_SWDPIO_SHIFT;
819 }
820
821 #if 0
822 if (sc->sc_type >= WM_T_82544) {
823 if (cfg1 & EEPROM_CFG1_IPS0)
824 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
825 if (cfg1 & EEPROM_CFG1_IPS1)
826 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
827 sc->sc_ctrl_ext |=
828 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
829 CTRL_EXT_SWDPIO_SHIFT;
830 sc->sc_ctrl_ext |=
831 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
832 CTRL_EXT_SWDPINS_SHIFT;
833 } else {
834 sc->sc_ctrl_ext |=
835 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
836 CTRL_EXT_SWDPIO_SHIFT;
837 }
838 #endif
839
840 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
841 #if 0
842 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
843 #endif
844
845 /*
846 * Set up some register offsets that are different between
847 * the i82542 and the i82543 and later chips.
848 */
849 if (sc->sc_type < WM_T_82543) {
850 sc->sc_rdt_reg = WMREG_OLD_RDT0;
851 sc->sc_tdt_reg = WMREG_OLD_TDT;
852 } else {
853 sc->sc_rdt_reg = WMREG_RDT;
854 sc->sc_tdt_reg = WMREG_TDT;
855 }
856
857 /*
858 * Determine if we should use flow control. We should
859 * always use it, unless we're on a i82542 < 2.1.
860 */
861 if (sc->sc_type >= WM_T_82542_2_1)
862 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
863
864 /*
865 * Determine if we're TBI or GMII mode, and initialize the
866 * media structures accordingly.
867 */
868 if (sc->sc_type < WM_T_82543 ||
869 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
870 if (wmp->wmp_flags & WMP_F_1000T)
871 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
872 "product!\n", sc->sc_dev.dv_xname);
873 wm_tbi_mediainit(sc);
874 } else {
875 if (wmp->wmp_flags & WMP_F_1000X)
876 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
877 "product!\n", sc->sc_dev.dv_xname);
878 wm_gmii_mediainit(sc);
879 }
880
881 ifp = &sc->sc_ethercom.ec_if;
882 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
883 ifp->if_softc = sc;
884 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
885 ifp->if_ioctl = wm_ioctl;
886 ifp->if_start = wm_start;
887 ifp->if_watchdog = wm_watchdog;
888 ifp->if_init = wm_init;
889 ifp->if_stop = wm_stop;
890 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
891 IFQ_SET_READY(&ifp->if_snd);
892
893 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
894
895 /*
896 * If we're a i82543 or greater, we can support VLANs.
897 */
898 if (sc->sc_type >= WM_T_82543)
899 sc->sc_ethercom.ec_capabilities |=
900 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
901
902 /*
903 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
904 * on i82543 and later.
905 */
906 if (sc->sc_type >= WM_T_82543)
907 ifp->if_capabilities |=
908 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
909
910 /*
911 * Attach the interface.
912 */
913 if_attach(ifp);
914 ether_ifattach(ifp, enaddr);
915 #if NRND > 0
916 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
917 RND_TYPE_NET, 0);
918 #endif
919
920 #ifdef WM_EVENT_COUNTERS
921 /* Attach event counters. */
922 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
923 NULL, sc->sc_dev.dv_xname, "txsstall");
924 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
925 NULL, sc->sc_dev.dv_xname, "txdstall");
926 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "txforceintr");
928 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
929 NULL, sc->sc_dev.dv_xname, "txdw");
930 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
931 NULL, sc->sc_dev.dv_xname, "txqe");
932 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
933 NULL, sc->sc_dev.dv_xname, "rxintr");
934 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
935 NULL, sc->sc_dev.dv_xname, "linkintr");
936
937 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
938 NULL, sc->sc_dev.dv_xname, "rxipsum");
939 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
940 NULL, sc->sc_dev.dv_xname, "rxtusum");
941 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
942 NULL, sc->sc_dev.dv_xname, "txipsum");
943 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
944 NULL, sc->sc_dev.dv_xname, "txtusum");
945
946 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
947 NULL, sc->sc_dev.dv_xname, "txctx init");
948 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
949 NULL, sc->sc_dev.dv_xname, "txctx hit");
950 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
951 NULL, sc->sc_dev.dv_xname, "txctx miss");
952
953 for (i = 0; i < WM_NTXSEGS; i++)
954 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
955 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
956
957 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
958 NULL, sc->sc_dev.dv_xname, "txdrop");
959
960 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
961 NULL, sc->sc_dev.dv_xname, "tu");
962 #endif /* WM_EVENT_COUNTERS */
963
964 /*
965 * Make sure the interface is shutdown during reboot.
966 */
967 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
968 if (sc->sc_sdhook == NULL)
969 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
970 sc->sc_dev.dv_xname);
971 return;
972
973 /*
974 * Free any resources we've allocated during the failed attach
975 * attempt. Do this in reverse order and fall through.
976 */
977 fail_5:
978 for (i = 0; i < WM_NRXDESC; i++) {
979 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
980 bus_dmamap_destroy(sc->sc_dmat,
981 sc->sc_rxsoft[i].rxs_dmamap);
982 }
983 fail_4:
984 for (i = 0; i < WM_TXQUEUELEN; i++) {
985 if (sc->sc_txsoft[i].txs_dmamap != NULL)
986 bus_dmamap_destroy(sc->sc_dmat,
987 sc->sc_txsoft[i].txs_dmamap);
988 }
989 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
990 fail_3:
991 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
992 fail_2:
993 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
994 sizeof(struct wm_control_data));
995 fail_1:
996 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
997 fail_0:
998 return;
999 }
1000
1001 /*
1002 * wm_shutdown:
1003 *
1004 * Make sure the interface is stopped at reboot time.
1005 */
1006 void
1007 wm_shutdown(void *arg)
1008 {
1009 struct wm_softc *sc = arg;
1010
1011 wm_stop(&sc->sc_ethercom.ec_if, 1);
1012 }
1013
1014 /*
1015 * wm_tx_cksum:
1016 *
1017 * Set up TCP/IP checksumming parameters for the
1018 * specified packet.
1019 */
1020 static int
1021 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1022 uint32_t *fieldsp)
1023 {
1024 struct mbuf *m0 = txs->txs_mbuf;
1025 struct livengood_tcpip_ctxdesc *t;
1026 uint32_t fields = 0, ipcs, tucs;
1027 struct ip *ip;
1028 struct ether_header *eh;
1029 int offset, iphl;
1030
1031 /*
1032 * XXX It would be nice if the mbuf pkthdr had offset
1033 * fields for the protocol headers.
1034 */
1035
1036 eh = mtod(m0, struct ether_header *);
1037 switch (htons(eh->ether_type)) {
1038 case ETHERTYPE_IP:
1039 iphl = sizeof(struct ip);
1040 offset = ETHER_HDR_LEN;
1041 break;
1042
1043 case ETHERTYPE_VLAN:
1044 iphl = sizeof(struct ip);
1045 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1046 break;
1047
1048 default:
1049 /*
1050 * Don't support this protocol or encapsulation.
1051 */
1052 *fieldsp = 0;
1053 *cmdp = 0;
1054 return (0);
1055 }
1056
1057 if (m0->m_len < (offset + iphl)) {
1058 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1059 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1060 "packet dropped\n", sc->sc_dev.dv_xname);
1061 return (ENOMEM);
1062 }
1063 m0 = txs->txs_mbuf;
1064 }
1065
1066 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1067 iphl = ip->ip_hl << 2;
1068
1069 /*
1070 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1071 * offload feature, if we load the context descriptor, we
1072 * MUST provide valid values for IPCSS and TUCSS fields.
1073 */
1074
1075 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1076 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1077 fields |= htole32(WTX_IXSM);
1078 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1079 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1080 WTX_TCPIP_IPCSE(offset + iphl - 1));
1081 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1082 /* Use the cached value. */
1083 ipcs = sc->sc_txctx_ipcs;
1084 } else {
1085 /* Just initialize it to the likely value anyway. */
1086 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1087 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1088 WTX_TCPIP_IPCSE(offset + iphl - 1));
1089 }
1090
1091 offset += iphl;
1092
1093 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1094 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1095 fields |= htole32(WTX_TXSM);
1096 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1097 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1098 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1099 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1100 /* Use the cached value. */
1101 tucs = sc->sc_txctx_tucs;
1102 } else {
1103 /* Just initialize it to a valid TCP context. */
1104 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1105 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1106 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1107 }
1108
1109 if (sc->sc_txctx_ipcs == ipcs &&
1110 sc->sc_txctx_tucs == tucs) {
1111 /* Cached context is fine. */
1112 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1113 } else {
1114 /* Fill in the context descriptor. */
1115 #ifdef WM_EVENT_COUNTERS
1116 if (sc->sc_txctx_ipcs == 0xffffffff &&
1117 sc->sc_txctx_tucs == 0xffffffff)
1118 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1119 else
1120 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1121 #endif
1122 t = (struct livengood_tcpip_ctxdesc *)
1123 &sc->sc_txdescs[sc->sc_txnext];
1124 t->tcpip_ipcs = ipcs;
1125 t->tcpip_tucs = tucs;
1126 t->tcpip_cmdlen =
1127 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1128 t->tcpip_seg = 0;
1129 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1130
1131 sc->sc_txctx_ipcs = ipcs;
1132 sc->sc_txctx_tucs = tucs;
1133
1134 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1135 txs->txs_ndesc++;
1136 }
1137
1138 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1139 *fieldsp = fields;
1140
1141 return (0);
1142 }
1143
1144 /*
1145 * wm_start: [ifnet interface function]
1146 *
1147 * Start packet transmission on the interface.
1148 */
1149 void
1150 wm_start(struct ifnet *ifp)
1151 {
1152 struct wm_softc *sc = ifp->if_softc;
1153 struct mbuf *m0;
1154 #if 0 /* XXXJRT */
1155 struct m_tag *mtag;
1156 #endif
1157 struct wm_txsoft *txs;
1158 bus_dmamap_t dmamap;
1159 int error, nexttx, lasttx, ofree, seg;
1160 uint32_t cksumcmd, cksumfields;
1161
1162 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1163 return;
1164
1165 /*
1166 * Remember the previous number of free descriptors.
1167 */
1168 ofree = sc->sc_txfree;
1169
1170 /*
1171 * Loop through the send queue, setting up transmit descriptors
1172 * until we drain the queue, or use up all available transmit
1173 * descriptors.
1174 */
1175 for (;;) {
1176 /* Grab a packet off the queue. */
1177 IFQ_POLL(&ifp->if_snd, m0);
1178 if (m0 == NULL)
1179 break;
1180
1181 DPRINTF(WM_DEBUG_TX,
1182 ("%s: TX: have packet to transmit: %p\n",
1183 sc->sc_dev.dv_xname, m0));
1184
1185 /* Get a work queue entry. */
1186 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1187 wm_txintr(sc);
1188 if (sc->sc_txsfree == 0) {
1189 DPRINTF(WM_DEBUG_TX,
1190 ("%s: TX: no free job descriptors\n",
1191 sc->sc_dev.dv_xname));
1192 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1193 break;
1194 }
1195 }
1196
1197 txs = &sc->sc_txsoft[sc->sc_txsnext];
1198 dmamap = txs->txs_dmamap;
1199
1200 /*
1201 * Load the DMA map. If this fails, the packet either
1202 * didn't fit in the allotted number of segments, or we
1203 * were short on resources. For the too-many-segments
1204 * case, we simply report an error and drop the packet,
1205 * since we can't sanely copy a jumbo packet to a single
1206 * buffer.
1207 */
1208 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1209 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1210 if (error) {
1211 if (error == EFBIG) {
1212 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1213 printf("%s: Tx packet consumes too many "
1214 "DMA segments, dropping...\n",
1215 sc->sc_dev.dv_xname);
1216 IFQ_DEQUEUE(&ifp->if_snd, m0);
1217 m_freem(m0);
1218 continue;
1219 }
1220 /*
1221 * Short on resources, just stop for now.
1222 */
1223 DPRINTF(WM_DEBUG_TX,
1224 ("%s: TX: dmamap load failed: %d\n",
1225 sc->sc_dev.dv_xname, error));
1226 break;
1227 }
1228
1229 /*
1230 * Ensure we have enough descriptors free to describe
1231 * the packet. Note, we always reserve one descriptor
1232 * at the end of the ring due to the semantics of the
1233 * TDT register, plus one more in the event we need
1234 * to re-load checksum offload context.
1235 */
1236 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1237 /*
1238 * Not enough free descriptors to transmit this
1239 * packet. We haven't committed anything yet,
1240 * so just unload the DMA map, put the packet
1241 * pack on the queue, and punt. Notify the upper
1242 * layer that there are no more slots left.
1243 */
1244 DPRINTF(WM_DEBUG_TX,
1245 ("%s: TX: need %d descriptors, have %d\n",
1246 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1247 sc->sc_txfree - 1));
1248 ifp->if_flags |= IFF_OACTIVE;
1249 bus_dmamap_unload(sc->sc_dmat, dmamap);
1250 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1251 break;
1252 }
1253
1254 IFQ_DEQUEUE(&ifp->if_snd, m0);
1255
1256 /*
1257 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1258 */
1259
1260 /* Sync the DMA map. */
1261 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1262 BUS_DMASYNC_PREWRITE);
1263
1264 DPRINTF(WM_DEBUG_TX,
1265 ("%s: TX: packet has %d DMA segments\n",
1266 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1267
1268 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1269
1270 /*
1271 * Store a pointer to the packet so that we can free it
1272 * later.
1273 *
1274 * Initially, we consider the number of descriptors the
1275 * packet uses the number of DMA segments. This may be
1276 * incremented by 1 if we do checksum offload (a descriptor
1277 * is used to set the checksum context).
1278 */
1279 txs->txs_mbuf = m0;
1280 txs->txs_firstdesc = sc->sc_txnext;
1281 txs->txs_ndesc = dmamap->dm_nsegs;
1282
1283 /*
1284 * Set up checksum offload parameters for
1285 * this packet.
1286 */
1287 if (m0->m_pkthdr.csum_flags &
1288 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1289 if (wm_tx_cksum(sc, txs, &cksumcmd,
1290 &cksumfields) != 0) {
1291 /* Error message already displayed. */
1292 bus_dmamap_unload(sc->sc_dmat, dmamap);
1293 continue;
1294 }
1295 } else {
1296 cksumcmd = 0;
1297 cksumfields = 0;
1298 }
1299
1300 cksumcmd |= htole32(WTX_CMD_IDE);
1301
1302 /*
1303 * Initialize the transmit descriptor.
1304 */
1305 for (nexttx = sc->sc_txnext, seg = 0;
1306 seg < dmamap->dm_nsegs;
1307 seg++, nexttx = WM_NEXTTX(nexttx)) {
1308 /*
1309 * Note: we currently only use 32-bit DMA
1310 * addresses.
1311 */
1312 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1313 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1314 htole32(dmamap->dm_segs[seg].ds_addr);
1315 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1316 htole32(dmamap->dm_segs[seg].ds_len);
1317 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1318 cksumfields;
1319 lasttx = nexttx;
1320
1321 DPRINTF(WM_DEBUG_TX,
1322 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1323 sc->sc_dev.dv_xname, nexttx,
1324 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1325 (uint32_t) dmamap->dm_segs[seg].ds_len));
1326 }
1327
1328 /*
1329 * Set up the command byte on the last descriptor of
1330 * the packet. If we're in the interrupt delay window,
1331 * delay the interrupt.
1332 */
1333 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1334 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1335
1336 #if 0 /* XXXJRT */
1337 /*
1338 * If VLANs are enabled and the packet has a VLAN tag, set
1339 * up the descriptor to encapsulate the packet for us.
1340 *
1341 * This is only valid on the last descriptor of the packet.
1342 */
1343 if (sc->sc_ethercom.ec_nvlans != 0 &&
1344 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1345 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1346 htole32(WTX_CMD_VLE);
1347 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1348 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1349 }
1350 #endif /* XXXJRT */
1351
1352 txs->txs_lastdesc = lasttx;
1353
1354 DPRINTF(WM_DEBUG_TX,
1355 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1356 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1357
1358 /* Sync the descriptors we're using. */
1359 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1360 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1361
1362 /* Give the packet to the chip. */
1363 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1364
1365 DPRINTF(WM_DEBUG_TX,
1366 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1367
1368 DPRINTF(WM_DEBUG_TX,
1369 ("%s: TX: finished transmitting packet, job %d\n",
1370 sc->sc_dev.dv_xname, sc->sc_txsnext));
1371
1372 /* Advance the tx pointer. */
1373 sc->sc_txfree -= txs->txs_ndesc;
1374 sc->sc_txnext = nexttx;
1375
1376 sc->sc_txsfree--;
1377 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1378
1379 #if NBPFILTER > 0
1380 /* Pass the packet to any BPF listeners. */
1381 if (ifp->if_bpf)
1382 bpf_mtap(ifp->if_bpf, m0);
1383 #endif /* NBPFILTER > 0 */
1384 }
1385
1386 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1387 /* No more slots; notify upper layer. */
1388 ifp->if_flags |= IFF_OACTIVE;
1389 }
1390
1391 if (sc->sc_txfree != ofree) {
1392 /* Set a watchdog timer in case the chip flakes out. */
1393 ifp->if_timer = 5;
1394 }
1395 }
1396
1397 /*
1398 * wm_watchdog: [ifnet interface function]
1399 *
1400 * Watchdog timer handler.
1401 */
1402 void
1403 wm_watchdog(struct ifnet *ifp)
1404 {
1405 struct wm_softc *sc = ifp->if_softc;
1406
1407 /*
1408 * Since we're using delayed interrupts, sweep up
1409 * before we report an error.
1410 */
1411 wm_txintr(sc);
1412
1413 if (sc->sc_txfree != WM_NTXDESC) {
1414 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1415 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1416 sc->sc_txnext);
1417 ifp->if_oerrors++;
1418
1419 /* Reset the interface. */
1420 (void) wm_init(ifp);
1421 }
1422
1423 /* Try to get more packets going. */
1424 wm_start(ifp);
1425 }
1426
1427 /*
1428 * wm_ioctl: [ifnet interface function]
1429 *
1430 * Handle control requests from the operator.
1431 */
1432 int
1433 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1434 {
1435 struct wm_softc *sc = ifp->if_softc;
1436 struct ifreq *ifr = (struct ifreq *) data;
1437 int s, error;
1438
1439 s = splnet();
1440
1441 switch (cmd) {
1442 case SIOCSIFMEDIA:
1443 case SIOCGIFMEDIA:
1444 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1445 break;
1446 default:
1447 error = ether_ioctl(ifp, cmd, data);
1448 if (error == ENETRESET) {
1449 /*
1450 * Multicast list has changed; set the hardware filter
1451 * accordingly.
1452 */
1453 wm_set_filter(sc);
1454 error = 0;
1455 }
1456 break;
1457 }
1458
1459 /* Try to get more packets going. */
1460 wm_start(ifp);
1461
1462 splx(s);
1463 return (error);
1464 }
1465
1466 /*
1467 * wm_intr:
1468 *
1469 * Interrupt service routine.
1470 */
1471 int
1472 wm_intr(void *arg)
1473 {
1474 struct wm_softc *sc = arg;
1475 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1476 uint32_t icr;
1477 int wantinit, handled = 0;
1478
1479 for (wantinit = 0; wantinit == 0;) {
1480 icr = CSR_READ(sc, WMREG_ICR);
1481 if ((icr & sc->sc_icr) == 0)
1482 break;
1483
1484 #if 0 /*NRND > 0*/
1485 if (RND_ENABLED(&sc->rnd_source))
1486 rnd_add_uint32(&sc->rnd_source, icr);
1487 #endif
1488
1489 handled = 1;
1490
1491 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1492 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1493 DPRINTF(WM_DEBUG_RX,
1494 ("%s: RX: got Rx intr 0x%08x\n",
1495 sc->sc_dev.dv_xname,
1496 icr & (ICR_RXDMT0|ICR_RXT0)));
1497 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1498 }
1499 #endif
1500 wm_rxintr(sc);
1501
1502 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1503 if (icr & ICR_TXDW) {
1504 DPRINTF(WM_DEBUG_TX,
1505 ("%s: TX: got TDXW interrupt\n",
1506 sc->sc_dev.dv_xname));
1507 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1508 }
1509 #endif
1510 wm_txintr(sc);
1511
1512 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1513 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1514 wm_linkintr(sc, icr);
1515 }
1516
1517 if (icr & ICR_RXO) {
1518 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1519 wantinit = 1;
1520 }
1521 }
1522
1523 if (handled) {
1524 if (wantinit)
1525 wm_init(ifp);
1526
1527 /* Try to get more packets going. */
1528 wm_start(ifp);
1529 }
1530
1531 return (handled);
1532 }
1533
1534 /*
1535 * wm_txintr:
1536 *
1537 * Helper; handle transmit interrupts.
1538 */
1539 void
1540 wm_txintr(struct wm_softc *sc)
1541 {
1542 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1543 struct wm_txsoft *txs;
1544 uint8_t status;
1545 int i;
1546
1547 ifp->if_flags &= ~IFF_OACTIVE;
1548
1549 /*
1550 * Go through the Tx list and free mbufs for those
1551 * frames which have been transmitted.
1552 */
1553 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1554 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1555 txs = &sc->sc_txsoft[i];
1556
1557 DPRINTF(WM_DEBUG_TX,
1558 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1559
1560 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1561 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1562
1563 status = le32toh(sc->sc_txdescs[
1564 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1565 if ((status & WTX_ST_DD) == 0) {
1566 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1567 BUS_DMASYNC_PREREAD);
1568 break;
1569 }
1570
1571 DPRINTF(WM_DEBUG_TX,
1572 ("%s: TX: job %d done: descs %d..%d\n",
1573 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1574 txs->txs_lastdesc));
1575
1576 /*
1577 * XXX We should probably be using the statistics
1578 * XXX registers, but I don't know if they exist
1579 * XXX on chips before the i82544.
1580 */
1581
1582 #ifdef WM_EVENT_COUNTERS
1583 if (status & WTX_ST_TU)
1584 WM_EVCNT_INCR(&sc->sc_ev_tu);
1585 #endif /* WM_EVENT_COUNTERS */
1586
1587 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1588 ifp->if_oerrors++;
1589 if (status & WTX_ST_LC)
1590 printf("%s: late collision\n",
1591 sc->sc_dev.dv_xname);
1592 else if (status & WTX_ST_EC) {
1593 ifp->if_collisions += 16;
1594 printf("%s: excessive collisions\n",
1595 sc->sc_dev.dv_xname);
1596 }
1597 } else
1598 ifp->if_opackets++;
1599
1600 sc->sc_txfree += txs->txs_ndesc;
1601 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1602 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1603 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1604 m_freem(txs->txs_mbuf);
1605 txs->txs_mbuf = NULL;
1606 }
1607
1608 /* Update the dirty transmit buffer pointer. */
1609 sc->sc_txsdirty = i;
1610 DPRINTF(WM_DEBUG_TX,
1611 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1612
1613 /*
1614 * If there are no more pending transmissions, cancel the watchdog
1615 * timer.
1616 */
1617 if (sc->sc_txsfree == WM_TXQUEUELEN)
1618 ifp->if_timer = 0;
1619 }
1620
1621 /*
1622 * wm_rxintr:
1623 *
1624 * Helper; handle receive interrupts.
1625 */
1626 void
1627 wm_rxintr(struct wm_softc *sc)
1628 {
1629 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1630 struct wm_rxsoft *rxs;
1631 struct mbuf *m;
1632 int i, len;
1633 uint8_t status, errors;
1634
1635 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1636 rxs = &sc->sc_rxsoft[i];
1637
1638 DPRINTF(WM_DEBUG_RX,
1639 ("%s: RX: checking descriptor %d\n",
1640 sc->sc_dev.dv_xname, i));
1641
1642 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1643
1644 status = sc->sc_rxdescs[i].wrx_status;
1645 errors = sc->sc_rxdescs[i].wrx_errors;
1646 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1647
1648 if ((status & WRX_ST_DD) == 0) {
1649 /*
1650 * We have processed all of the receive descriptors.
1651 */
1652 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1653 break;
1654 }
1655
1656 if (__predict_false(sc->sc_rxdiscard)) {
1657 DPRINTF(WM_DEBUG_RX,
1658 ("%s: RX: discarding contents of descriptor %d\n",
1659 sc->sc_dev.dv_xname, i));
1660 WM_INIT_RXDESC(sc, i);
1661 if (status & WRX_ST_EOP) {
1662 /* Reset our state. */
1663 DPRINTF(WM_DEBUG_RX,
1664 ("%s: RX: resetting rxdiscard -> 0\n",
1665 sc->sc_dev.dv_xname));
1666 sc->sc_rxdiscard = 0;
1667 }
1668 continue;
1669 }
1670
1671 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1672 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1673
1674 m = rxs->rxs_mbuf;
1675
1676 /*
1677 * Add a new receive buffer to the ring.
1678 */
1679 if (wm_add_rxbuf(sc, i) != 0) {
1680 /*
1681 * Failed, throw away what we've done so
1682 * far, and discard the rest of the packet.
1683 */
1684 ifp->if_ierrors++;
1685 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1686 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1687 WM_INIT_RXDESC(sc, i);
1688 if ((status & WRX_ST_EOP) == 0)
1689 sc->sc_rxdiscard = 1;
1690 if (sc->sc_rxhead != NULL)
1691 m_freem(sc->sc_rxhead);
1692 WM_RXCHAIN_RESET(sc);
1693 DPRINTF(WM_DEBUG_RX,
1694 ("%s: RX: Rx buffer allocation failed, "
1695 "dropping packet%s\n", sc->sc_dev.dv_xname,
1696 sc->sc_rxdiscard ? " (discard)" : ""));
1697 continue;
1698 }
1699
1700 WM_RXCHAIN_LINK(sc, m);
1701
1702 m->m_len = len;
1703
1704 DPRINTF(WM_DEBUG_RX,
1705 ("%s: RX: buffer at %p len %d\n",
1706 sc->sc_dev.dv_xname, m->m_data, len));
1707
1708 /*
1709 * If this is not the end of the packet, keep
1710 * looking.
1711 */
1712 if ((status & WRX_ST_EOP) == 0) {
1713 sc->sc_rxlen += len;
1714 DPRINTF(WM_DEBUG_RX,
1715 ("%s: RX: not yet EOP, rxlen -> %d\n",
1716 sc->sc_dev.dv_xname, sc->sc_rxlen));
1717 continue;
1718 }
1719
1720 /*
1721 * Okay, we have the entire packet now...
1722 */
1723 *sc->sc_rxtailp = NULL;
1724 m = sc->sc_rxhead;
1725 len += sc->sc_rxlen;
1726
1727 WM_RXCHAIN_RESET(sc);
1728
1729 DPRINTF(WM_DEBUG_RX,
1730 ("%s: RX: have entire packet, len -> %d\n",
1731 sc->sc_dev.dv_xname, len));
1732
1733 /*
1734 * If an error occurred, update stats and drop the packet.
1735 */
1736 if (errors &
1737 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1738 ifp->if_ierrors++;
1739 if (errors & WRX_ER_SE)
1740 printf("%s: symbol error\n",
1741 sc->sc_dev.dv_xname);
1742 else if (errors & WRX_ER_SEQ)
1743 printf("%s: receive sequence error\n",
1744 sc->sc_dev.dv_xname);
1745 else if (errors & WRX_ER_CE)
1746 printf("%s: CRC error\n",
1747 sc->sc_dev.dv_xname);
1748 m_freem(m);
1749 continue;
1750 }
1751
1752 /*
1753 * No errors. Receive the packet.
1754 *
1755 * Note, we have configured the chip to include the
1756 * CRC with every packet.
1757 */
1758 m->m_flags |= M_HASFCS;
1759 m->m_pkthdr.rcvif = ifp;
1760 m->m_pkthdr.len = len;
1761
1762 #if 0 /* XXXJRT */
1763 /*
1764 * If VLANs are enabled, VLAN packets have been unwrapped
1765 * for us. Associate the tag with the packet.
1766 */
1767 if (sc->sc_ethercom.ec_nvlans != 0 &&
1768 (status & WRX_ST_VP) != 0) {
1769 struct m_tag *vtag;
1770
1771 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1772 M_NOWAIT);
1773 if (vtag == NULL) {
1774 ifp->if_ierrors++;
1775 printf("%s: unable to allocate VLAN tag\n",
1776 sc->sc_dev.dv_xname);
1777 m_freem(m);
1778 continue;
1779 }
1780
1781 *(u_int *)(vtag + 1) =
1782 le16toh(sc->sc_rxdescs[i].wrx_special);
1783 }
1784 #endif /* XXXJRT */
1785
1786 /*
1787 * Set up checksum info for this packet.
1788 */
1789 if (status & WRX_ST_IPCS) {
1790 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1791 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1792 if (errors & WRX_ER_IPE)
1793 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1794 }
1795 if (status & WRX_ST_TCPCS) {
1796 /*
1797 * Note: we don't know if this was TCP or UDP,
1798 * so we just set both bits, and expect the
1799 * upper layers to deal.
1800 */
1801 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1802 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1803 if (errors & WRX_ER_TCPE)
1804 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1805 }
1806
1807 ifp->if_ipackets++;
1808
1809 #if NBPFILTER > 0
1810 /* Pass this up to any BPF listeners. */
1811 if (ifp->if_bpf)
1812 bpf_mtap(ifp->if_bpf, m);
1813 #endif /* NBPFILTER > 0 */
1814
1815 /* Pass it on. */
1816 (*ifp->if_input)(ifp, m);
1817 }
1818
1819 /* Update the receive pointer. */
1820 sc->sc_rxptr = i;
1821
1822 DPRINTF(WM_DEBUG_RX,
1823 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1824 }
1825
1826 /*
1827 * wm_linkintr:
1828 *
1829 * Helper; handle link interrupts.
1830 */
1831 void
1832 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1833 {
1834 uint32_t status;
1835
1836 /*
1837 * If we get a link status interrupt on a 1000BASE-T
1838 * device, just fall into the normal MII tick path.
1839 */
1840 if (sc->sc_flags & WM_F_HAS_MII) {
1841 if (icr & ICR_LSC) {
1842 DPRINTF(WM_DEBUG_LINK,
1843 ("%s: LINK: LSC -> mii_tick\n",
1844 sc->sc_dev.dv_xname));
1845 mii_tick(&sc->sc_mii);
1846 } else if (icr & ICR_RXSEQ) {
1847 DPRINTF(WM_DEBUG_LINK,
1848 ("%s: LINK Receive sequence error\n",
1849 sc->sc_dev.dv_xname));
1850 }
1851 return;
1852 }
1853
1854 /*
1855 * If we are now receiving /C/, check for link again in
1856 * a couple of link clock ticks.
1857 */
1858 if (icr & ICR_RXCFG) {
1859 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1860 sc->sc_dev.dv_xname));
1861 sc->sc_tbi_anstate = 2;
1862 }
1863
1864 if (icr & ICR_LSC) {
1865 status = CSR_READ(sc, WMREG_STATUS);
1866 if (status & STATUS_LU) {
1867 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1868 sc->sc_dev.dv_xname,
1869 (status & STATUS_FD) ? "FDX" : "HDX"));
1870 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1871 if (status & STATUS_FD)
1872 sc->sc_tctl |=
1873 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1874 else
1875 sc->sc_tctl |=
1876 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1877 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1878 sc->sc_tbi_linkup = 1;
1879 } else {
1880 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1881 sc->sc_dev.dv_xname));
1882 sc->sc_tbi_linkup = 0;
1883 }
1884 sc->sc_tbi_anstate = 2;
1885 wm_tbi_set_linkled(sc);
1886 } else if (icr & ICR_RXSEQ) {
1887 DPRINTF(WM_DEBUG_LINK,
1888 ("%s: LINK: Receive sequence error\n",
1889 sc->sc_dev.dv_xname));
1890 }
1891 }
1892
1893 /*
1894 * wm_tick:
1895 *
1896 * One second timer, used to check link status, sweep up
1897 * completed transmit jobs, etc.
1898 */
1899 void
1900 wm_tick(void *arg)
1901 {
1902 struct wm_softc *sc = arg;
1903 int s;
1904
1905 s = splnet();
1906
1907 if (sc->sc_flags & WM_F_HAS_MII)
1908 mii_tick(&sc->sc_mii);
1909 else
1910 wm_tbi_check_link(sc);
1911
1912 splx(s);
1913
1914 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1915 }
1916
1917 /*
1918 * wm_reset:
1919 *
1920 * Reset the i82542 chip.
1921 */
1922 void
1923 wm_reset(struct wm_softc *sc)
1924 {
1925 int i;
1926
1927 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1928 delay(10000);
1929
1930 for (i = 0; i < 1000; i++) {
1931 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1932 return;
1933 delay(20);
1934 }
1935
1936 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1937 printf("%s: WARNING: reset failed to complete\n",
1938 sc->sc_dev.dv_xname);
1939 }
1940
1941 /*
1942 * wm_init: [ifnet interface function]
1943 *
1944 * Initialize the interface. Must be called at splnet().
1945 */
1946 int
1947 wm_init(struct ifnet *ifp)
1948 {
1949 struct wm_softc *sc = ifp->if_softc;
1950 struct wm_rxsoft *rxs;
1951 int i, error = 0;
1952 uint32_t reg;
1953
1954 #ifndef __NO_STRICT_ALIGNMENT
1955 if((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) >
1956 (MCLBYTES - 2)) {
1957 wm_align_tweak = 0;
1958 }
1959 else {
1960 wm_align_tweak = 2;
1961 }
1962 #else
1963 wm_align_tweak = 0;
1964 #endif
1965
1966 /* Cancel any pending I/O. */
1967 wm_stop(ifp, 0);
1968
1969 /* Reset the chip to a known state. */
1970 wm_reset(sc);
1971
1972 /* Initialize the transmit descriptor ring. */
1973 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1974 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1975 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1976 sc->sc_txfree = WM_NTXDESC;
1977 sc->sc_txnext = 0;
1978
1979 sc->sc_txctx_ipcs = 0xffffffff;
1980 sc->sc_txctx_tucs = 0xffffffff;
1981
1982 if (sc->sc_type < WM_T_82543) {
1983 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1984 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1985 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1986 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1987 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1988 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1989 } else {
1990 CSR_WRITE(sc, WMREG_TBDAH, 0);
1991 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1992 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1993 CSR_WRITE(sc, WMREG_TDH, 0);
1994 CSR_WRITE(sc, WMREG_TDT, 0);
1995 CSR_WRITE(sc, WMREG_TIDV, 128);
1996
1997 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1998 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1999 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2000 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2001 }
2002 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2003 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2004
2005 /* Initialize the transmit job descriptors. */
2006 for (i = 0; i < WM_TXQUEUELEN; i++)
2007 sc->sc_txsoft[i].txs_mbuf = NULL;
2008 sc->sc_txsfree = WM_TXQUEUELEN;
2009 sc->sc_txsnext = 0;
2010 sc->sc_txsdirty = 0;
2011
2012 /*
2013 * Initialize the receive descriptor and receive job
2014 * descriptor rings.
2015 */
2016 if (sc->sc_type < WM_T_82543) {
2017 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2018 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2019 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2020 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2021 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2022 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2023
2024 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2025 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2026 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2027 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2028 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2029 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2030 } else {
2031 CSR_WRITE(sc, WMREG_RDBAH, 0);
2032 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2033 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2034 CSR_WRITE(sc, WMREG_RDH, 0);
2035 CSR_WRITE(sc, WMREG_RDT, 0);
2036 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2037 }
2038 for (i = 0; i < WM_NRXDESC; i++) {
2039 rxs = &sc->sc_rxsoft[i];
2040 if (rxs->rxs_mbuf == NULL) {
2041 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2042 printf("%s: unable to allocate or map rx "
2043 "buffer %d, error = %d\n",
2044 sc->sc_dev.dv_xname, i, error);
2045 /*
2046 * XXX Should attempt to run with fewer receive
2047 * XXX buffers instead of just failing.
2048 */
2049 wm_rxdrain(sc);
2050 goto out;
2051 }
2052 } else
2053 WM_INIT_RXDESC(sc, i);
2054 }
2055 sc->sc_rxptr = 0;
2056 sc->sc_rxdiscard = 0;
2057 WM_RXCHAIN_RESET(sc);
2058
2059 /*
2060 * Clear out the VLAN table -- we don't use it (yet).
2061 */
2062 CSR_WRITE(sc, WMREG_VET, 0);
2063 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2064 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2065
2066 /*
2067 * Set up flow-control parameters.
2068 *
2069 * XXX Values could probably stand some tuning.
2070 */
2071 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2072 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2073 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2074 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2075
2076 if (sc->sc_type < WM_T_82543) {
2077 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2078 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2079 } else {
2080 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2081 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2082 }
2083 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2084 }
2085
2086 #if 0 /* XXXJRT */
2087 /* Deal with VLAN enables. */
2088 if (sc->sc_ethercom.ec_nvlans != 0)
2089 sc->sc_ctrl |= CTRL_VME;
2090 else
2091 #endif /* XXXJRT */
2092 sc->sc_ctrl &= ~CTRL_VME;
2093
2094 /* Write the control registers. */
2095 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2096 #if 0
2097 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2098 #endif
2099
2100 /*
2101 * Set up checksum offload parameters.
2102 */
2103 reg = CSR_READ(sc, WMREG_RXCSUM);
2104 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2105 reg |= RXCSUM_IPOFL;
2106 else
2107 reg &= ~RXCSUM_IPOFL;
2108 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2109 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2110 else {
2111 reg &= ~RXCSUM_TUOFL;
2112 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2113 reg &= ~RXCSUM_IPOFL;
2114 }
2115 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2116
2117 /*
2118 * Set up the interrupt registers.
2119 */
2120 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2121 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2122 ICR_RXO | ICR_RXT0;
2123 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2124 sc->sc_icr |= ICR_RXCFG;
2125 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2126
2127 /* Set up the inter-packet gap. */
2128 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2129
2130 #if 0 /* XXXJRT */
2131 /* Set the VLAN ethernetype. */
2132 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2133 #endif
2134
2135 /*
2136 * Set up the transmit control register; we start out with
2137 * a collision distance suitable for FDX, but update it whe
2138 * we resolve the media type.
2139 */
2140 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2141 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2142 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2143
2144 /* Set the media. */
2145 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2146
2147 /*
2148 * Set up the receive control register; we actually program
2149 * the register when we set the receive filter. Use multicast
2150 * address offset type 0.
2151 *
2152 * Only the i82544 has the ability to strip the incoming
2153 * CRC, so we don't enable that feature.
2154 */
2155 sc->sc_mchash_type = 0;
2156 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2157 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2158
2159 if(MCLBYTES == 2048) {
2160 sc->sc_rctl |= RCTL_2k;
2161 } else {
2162 /*
2163 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2164 * XXX segments, dropping" -- why?
2165 */
2166 #if 0
2167 if(sc->sc_type >= WM_T_82543) {
2168 switch(MCLBYTES) {
2169 case 4096:
2170 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2171 break;
2172 case 8192:
2173 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2174 break;
2175 case 16384:
2176 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2177 break;
2178 default:
2179 panic("wm_init: MCLBYTES %d unsupported",
2180 MCLBYTES);
2181 break;
2182 }
2183 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2184 #else
2185 panic("wm_init: MCLBYTES > 2048 not supported.");
2186 #endif
2187 }
2188
2189 /* Set the receive filter. */
2190 wm_set_filter(sc);
2191
2192 /* Start the one second link check clock. */
2193 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2194
2195 /* ...all done! */
2196 ifp->if_flags |= IFF_RUNNING;
2197 ifp->if_flags &= ~IFF_OACTIVE;
2198
2199 out:
2200 if (error)
2201 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2202 return (error);
2203 }
2204
2205 /*
2206 * wm_rxdrain:
2207 *
2208 * Drain the receive queue.
2209 */
2210 void
2211 wm_rxdrain(struct wm_softc *sc)
2212 {
2213 struct wm_rxsoft *rxs;
2214 int i;
2215
2216 for (i = 0; i < WM_NRXDESC; i++) {
2217 rxs = &sc->sc_rxsoft[i];
2218 if (rxs->rxs_mbuf != NULL) {
2219 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2220 m_freem(rxs->rxs_mbuf);
2221 rxs->rxs_mbuf = NULL;
2222 }
2223 }
2224 }
2225
2226 /*
2227 * wm_stop: [ifnet interface function]
2228 *
2229 * Stop transmission on the interface.
2230 */
2231 void
2232 wm_stop(struct ifnet *ifp, int disable)
2233 {
2234 struct wm_softc *sc = ifp->if_softc;
2235 struct wm_txsoft *txs;
2236 int i;
2237
2238 /* Stop the one second clock. */
2239 callout_stop(&sc->sc_tick_ch);
2240
2241 if (sc->sc_flags & WM_F_HAS_MII) {
2242 /* Down the MII. */
2243 mii_down(&sc->sc_mii);
2244 }
2245
2246 /* Stop the transmit and receive processes. */
2247 CSR_WRITE(sc, WMREG_TCTL, 0);
2248 CSR_WRITE(sc, WMREG_RCTL, 0);
2249
2250 /* Release any queued transmit buffers. */
2251 for (i = 0; i < WM_TXQUEUELEN; i++) {
2252 txs = &sc->sc_txsoft[i];
2253 if (txs->txs_mbuf != NULL) {
2254 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2255 m_freem(txs->txs_mbuf);
2256 txs->txs_mbuf = NULL;
2257 }
2258 }
2259
2260 if (disable)
2261 wm_rxdrain(sc);
2262
2263 /* Mark the interface as down and cancel the watchdog timer. */
2264 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2265 ifp->if_timer = 0;
2266 }
2267
2268 /*
2269 * wm_read_eeprom:
2270 *
2271 * Read data from the serial EEPROM.
2272 */
2273 void
2274 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2275 {
2276 uint32_t reg;
2277 int i, x, addrbits = 6;
2278
2279 for (i = 0; i < wordcnt; i++) {
2280 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2281 reg = CSR_READ(sc, WMREG_EECD);
2282
2283 /* Get number of address bits. */
2284 if (reg & EECD_EE_SIZE)
2285 addrbits = 8;
2286
2287 /* Request EEPROM access. */
2288 reg |= EECD_EE_REQ;
2289 CSR_WRITE(sc, WMREG_EECD, reg);
2290
2291 /* ..and wait for it to be granted. */
2292 for (x = 0; x < 100; x++) {
2293 reg = CSR_READ(sc, WMREG_EECD);
2294 if (reg & EECD_EE_GNT)
2295 break;
2296 delay(5);
2297 }
2298 if ((reg & EECD_EE_GNT) == 0) {
2299 printf("%s: could not acquire EEPROM GNT\n",
2300 sc->sc_dev.dv_xname);
2301 *data = 0xffff;
2302 reg &= ~EECD_EE_REQ;
2303 CSR_WRITE(sc, WMREG_EECD, reg);
2304 continue;
2305 }
2306 } else
2307 reg = 0;
2308
2309 /* Clear SK and DI. */
2310 reg &= ~(EECD_SK | EECD_DI);
2311 CSR_WRITE(sc, WMREG_EECD, reg);
2312
2313 /* Set CHIP SELECT. */
2314 reg |= EECD_CS;
2315 CSR_WRITE(sc, WMREG_EECD, reg);
2316 delay(2);
2317
2318 /* Shift in the READ command. */
2319 for (x = 3; x > 0; x--) {
2320 if (UWIRE_OPC_READ & (1 << (x - 1)))
2321 reg |= EECD_DI;
2322 else
2323 reg &= ~EECD_DI;
2324 CSR_WRITE(sc, WMREG_EECD, reg);
2325 delay(2);
2326 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2327 delay(2);
2328 CSR_WRITE(sc, WMREG_EECD, reg);
2329 delay(2);
2330 }
2331
2332 /* Shift in address. */
2333 for (x = addrbits; x > 0; x--) {
2334 if ((word + i) & (1 << (x - 1)))
2335 reg |= EECD_DI;
2336 else
2337 reg &= ~EECD_DI;
2338 CSR_WRITE(sc, WMREG_EECD, reg);
2339 delay(2);
2340 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2341 delay(2);
2342 CSR_WRITE(sc, WMREG_EECD, reg);
2343 delay(2);
2344 }
2345
2346 /* Shift out the data. */
2347 reg &= ~EECD_DI;
2348 data[i] = 0;
2349 for (x = 16; x > 0; x--) {
2350 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2351 delay(2);
2352 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2353 data[i] |= (1 << (x - 1));
2354 CSR_WRITE(sc, WMREG_EECD, reg);
2355 delay(2);
2356 }
2357
2358 /* Clear CHIP SELECT. */
2359 reg &= ~EECD_CS;
2360 CSR_WRITE(sc, WMREG_EECD, reg);
2361 delay(2);
2362
2363 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2364 /* Release the EEPROM. */
2365 reg &= ~EECD_EE_REQ;
2366 CSR_WRITE(sc, WMREG_EECD, reg);
2367 }
2368 }
2369 }
2370
2371 /*
2372 * wm_add_rxbuf:
2373 *
2374 * Add a receive buffer to the indiciated descriptor.
2375 */
2376 int
2377 wm_add_rxbuf(struct wm_softc *sc, int idx)
2378 {
2379 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2380 struct mbuf *m;
2381 int error;
2382
2383 MGETHDR(m, M_DONTWAIT, MT_DATA);
2384 if (m == NULL)
2385 return (ENOBUFS);
2386
2387 MCLGET(m, M_DONTWAIT);
2388 if ((m->m_flags & M_EXT) == 0) {
2389 m_freem(m);
2390 return (ENOBUFS);
2391 }
2392
2393 if (rxs->rxs_mbuf != NULL)
2394 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2395
2396 rxs->rxs_mbuf = m;
2397
2398 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2399 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2400 BUS_DMA_READ|BUS_DMA_NOWAIT);
2401 if (error) {
2402 printf("%s: unable to load rx DMA map %d, error = %d\n",
2403 sc->sc_dev.dv_xname, idx, error);
2404 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2405 }
2406
2407 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2408 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2409
2410 WM_INIT_RXDESC(sc, idx);
2411
2412 return (0);
2413 }
2414
2415 /*
2416 * wm_set_ral:
2417 *
2418 * Set an entery in the receive address list.
2419 */
2420 static void
2421 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2422 {
2423 uint32_t ral_lo, ral_hi;
2424
2425 if (enaddr != NULL) {
2426 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2427 (enaddr[3] << 24);
2428 ral_hi = enaddr[4] | (enaddr[5] << 8);
2429 ral_hi |= RAL_AV;
2430 } else {
2431 ral_lo = 0;
2432 ral_hi = 0;
2433 }
2434
2435 if (sc->sc_type >= WM_T_82544) {
2436 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2437 ral_lo);
2438 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2439 ral_hi);
2440 } else {
2441 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2442 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2443 }
2444 }
2445
2446 /*
2447 * wm_mchash:
2448 *
2449 * Compute the hash of the multicast address for the 4096-bit
2450 * multicast filter.
2451 */
2452 static uint32_t
2453 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2454 {
2455 static const int lo_shift[4] = { 4, 3, 2, 0 };
2456 static const int hi_shift[4] = { 4, 5, 6, 8 };
2457 uint32_t hash;
2458
2459 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2460 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2461
2462 return (hash & 0xfff);
2463 }
2464
2465 /*
2466 * wm_set_filter:
2467 *
2468 * Set up the receive filter.
2469 */
2470 void
2471 wm_set_filter(struct wm_softc *sc)
2472 {
2473 struct ethercom *ec = &sc->sc_ethercom;
2474 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2475 struct ether_multi *enm;
2476 struct ether_multistep step;
2477 bus_addr_t mta_reg;
2478 uint32_t hash, reg, bit;
2479 int i;
2480
2481 if (sc->sc_type >= WM_T_82544)
2482 mta_reg = WMREG_CORDOVA_MTA;
2483 else
2484 mta_reg = WMREG_MTA;
2485
2486 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2487
2488 if (ifp->if_flags & IFF_BROADCAST)
2489 sc->sc_rctl |= RCTL_BAM;
2490 if (ifp->if_flags & IFF_PROMISC) {
2491 sc->sc_rctl |= RCTL_UPE;
2492 goto allmulti;
2493 }
2494
2495 /*
2496 * Set the station address in the first RAL slot, and
2497 * clear the remaining slots.
2498 */
2499 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2500 for (i = 1; i < WM_RAL_TABSIZE; i++)
2501 wm_set_ral(sc, NULL, i);
2502
2503 /* Clear out the multicast table. */
2504 for (i = 0; i < WM_MC_TABSIZE; i++)
2505 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2506
2507 ETHER_FIRST_MULTI(step, ec, enm);
2508 while (enm != NULL) {
2509 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2510 /*
2511 * We must listen to a range of multicast addresses.
2512 * For now, just accept all multicasts, rather than
2513 * trying to set only those filter bits needed to match
2514 * the range. (At this time, the only use of address
2515 * ranges is for IP multicast routing, for which the
2516 * range is big enough to require all bits set.)
2517 */
2518 goto allmulti;
2519 }
2520
2521 hash = wm_mchash(sc, enm->enm_addrlo);
2522
2523 reg = (hash >> 5) & 0x7f;
2524 bit = hash & 0x1f;
2525
2526 hash = CSR_READ(sc, mta_reg + (reg << 2));
2527 hash |= 1U << bit;
2528
2529 /* XXX Hardware bug?? */
2530 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2531 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2532 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2533 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2534 } else
2535 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2536
2537 ETHER_NEXT_MULTI(step, enm);
2538 }
2539
2540 ifp->if_flags &= ~IFF_ALLMULTI;
2541 goto setit;
2542
2543 allmulti:
2544 ifp->if_flags |= IFF_ALLMULTI;
2545 sc->sc_rctl |= RCTL_MPE;
2546
2547 setit:
2548 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2549 }
2550
2551 /*
2552 * wm_tbi_mediainit:
2553 *
2554 * Initialize media for use on 1000BASE-X devices.
2555 */
2556 void
2557 wm_tbi_mediainit(struct wm_softc *sc)
2558 {
2559 const char *sep = "";
2560
2561 if (sc->sc_type < WM_T_82543)
2562 sc->sc_tipg = TIPG_WM_DFLT;
2563 else
2564 sc->sc_tipg = TIPG_LG_DFLT;
2565
2566 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2567 wm_tbi_mediastatus);
2568
2569 /*
2570 * SWD Pins:
2571 *
2572 * 0 = Link LED (output)
2573 * 1 = Loss Of Signal (input)
2574 */
2575 sc->sc_ctrl |= CTRL_SWDPIO(0);
2576 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2577
2578 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2579
2580 #define ADD(ss, mm, dd) \
2581 do { \
2582 printf("%s%s", sep, ss); \
2583 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2584 sep = ", "; \
2585 } while (/*CONSTCOND*/0)
2586
2587 printf("%s: ", sc->sc_dev.dv_xname);
2588 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2589 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2590 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2591 printf("\n");
2592
2593 #undef ADD
2594
2595 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2596 }
2597
2598 /*
2599 * wm_tbi_mediastatus: [ifmedia interface function]
2600 *
2601 * Get the current interface media status on a 1000BASE-X device.
2602 */
2603 void
2604 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2605 {
2606 struct wm_softc *sc = ifp->if_softc;
2607
2608 ifmr->ifm_status = IFM_AVALID;
2609 ifmr->ifm_active = IFM_ETHER;
2610
2611 if (sc->sc_tbi_linkup == 0) {
2612 ifmr->ifm_active |= IFM_NONE;
2613 return;
2614 }
2615
2616 ifmr->ifm_status |= IFM_ACTIVE;
2617 ifmr->ifm_active |= IFM_1000_SX;
2618 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2619 ifmr->ifm_active |= IFM_FDX;
2620 }
2621
2622 /*
2623 * wm_tbi_mediachange: [ifmedia interface function]
2624 *
2625 * Set hardware to newly-selected media on a 1000BASE-X device.
2626 */
2627 int
2628 wm_tbi_mediachange(struct ifnet *ifp)
2629 {
2630 struct wm_softc *sc = ifp->if_softc;
2631 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2632 uint32_t status;
2633 int i;
2634
2635 sc->sc_txcw = ife->ifm_data;
2636 if (sc->sc_ctrl & CTRL_RFCE)
2637 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2638 if (sc->sc_ctrl & CTRL_TFCE)
2639 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2640 sc->sc_txcw |= TXCW_ANE;
2641
2642 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2643 delay(10000);
2644
2645 sc->sc_tbi_anstate = 0;
2646
2647 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2648 /* Have signal; wait for the link to come up. */
2649 for (i = 0; i < 50; i++) {
2650 delay(10000);
2651 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2652 break;
2653 }
2654
2655 status = CSR_READ(sc, WMREG_STATUS);
2656 if (status & STATUS_LU) {
2657 /* Link is up. */
2658 DPRINTF(WM_DEBUG_LINK,
2659 ("%s: LINK: set media -> link up %s\n",
2660 sc->sc_dev.dv_xname,
2661 (status & STATUS_FD) ? "FDX" : "HDX"));
2662 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2663 if (status & STATUS_FD)
2664 sc->sc_tctl |=
2665 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2666 else
2667 sc->sc_tctl |=
2668 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2669 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2670 sc->sc_tbi_linkup = 1;
2671 } else {
2672 /* Link is down. */
2673 DPRINTF(WM_DEBUG_LINK,
2674 ("%s: LINK: set media -> link down\n",
2675 sc->sc_dev.dv_xname));
2676 sc->sc_tbi_linkup = 0;
2677 }
2678 } else {
2679 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2680 sc->sc_dev.dv_xname));
2681 sc->sc_tbi_linkup = 0;
2682 }
2683
2684 wm_tbi_set_linkled(sc);
2685
2686 return (0);
2687 }
2688
2689 /*
2690 * wm_tbi_set_linkled:
2691 *
2692 * Update the link LED on 1000BASE-X devices.
2693 */
2694 void
2695 wm_tbi_set_linkled(struct wm_softc *sc)
2696 {
2697
2698 if (sc->sc_tbi_linkup)
2699 sc->sc_ctrl |= CTRL_SWDPIN(0);
2700 else
2701 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2702
2703 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2704 }
2705
2706 /*
2707 * wm_tbi_check_link:
2708 *
2709 * Check the link on 1000BASE-X devices.
2710 */
2711 void
2712 wm_tbi_check_link(struct wm_softc *sc)
2713 {
2714 uint32_t rxcw, ctrl, status;
2715
2716 if (sc->sc_tbi_anstate == 0)
2717 return;
2718 else if (sc->sc_tbi_anstate > 1) {
2719 DPRINTF(WM_DEBUG_LINK,
2720 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2721 sc->sc_tbi_anstate));
2722 sc->sc_tbi_anstate--;
2723 return;
2724 }
2725
2726 sc->sc_tbi_anstate = 0;
2727
2728 rxcw = CSR_READ(sc, WMREG_RXCW);
2729 ctrl = CSR_READ(sc, WMREG_CTRL);
2730 status = CSR_READ(sc, WMREG_STATUS);
2731
2732 if ((status & STATUS_LU) == 0) {
2733 DPRINTF(WM_DEBUG_LINK,
2734 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2735 sc->sc_tbi_linkup = 0;
2736 } else {
2737 DPRINTF(WM_DEBUG_LINK,
2738 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2739 (status & STATUS_FD) ? "FDX" : "HDX"));
2740 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2741 if (status & STATUS_FD)
2742 sc->sc_tctl |=
2743 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2744 else
2745 sc->sc_tctl |=
2746 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2747 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2748 sc->sc_tbi_linkup = 1;
2749 }
2750
2751 wm_tbi_set_linkled(sc);
2752 }
2753
2754 /*
2755 * wm_gmii_reset:
2756 *
2757 * Reset the PHY.
2758 */
2759 void
2760 wm_gmii_reset(struct wm_softc *sc)
2761 {
2762 uint32_t reg;
2763
2764 if (sc->sc_type >= WM_T_82544) {
2765 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2766 delay(20000);
2767
2768 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2769 delay(20000);
2770 } else {
2771 /* The PHY reset pin is active-low. */
2772 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2773 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2774 CTRL_EXT_SWDPIN(4));
2775 reg |= CTRL_EXT_SWDPIO(4);
2776
2777 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2778 delay(10);
2779
2780 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2781 delay(10);
2782
2783 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2784 delay(10);
2785 #if 0
2786 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2787 #endif
2788 }
2789 }
2790
2791 /*
2792 * wm_gmii_mediainit:
2793 *
2794 * Initialize media for use on 1000BASE-T devices.
2795 */
2796 void
2797 wm_gmii_mediainit(struct wm_softc *sc)
2798 {
2799 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2800
2801 /* We have MII. */
2802 sc->sc_flags |= WM_F_HAS_MII;
2803
2804 sc->sc_tipg = TIPG_1000T_DFLT;
2805
2806 /*
2807 * Let the chip set speed/duplex on its own based on
2808 * signals from the PHY.
2809 */
2810 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2811 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2812
2813 /* Initialize our media structures and probe the GMII. */
2814 sc->sc_mii.mii_ifp = ifp;
2815
2816 if (sc->sc_type >= WM_T_82544) {
2817 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2818 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2819 } else {
2820 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2821 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2822 }
2823 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2824
2825 wm_gmii_reset(sc);
2826
2827 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2828 wm_gmii_mediastatus);
2829
2830 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2831 MII_OFFSET_ANY, 0);
2832 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2833 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2834 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2835 } else
2836 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2837 }
2838
2839 /*
2840 * wm_gmii_mediastatus: [ifmedia interface function]
2841 *
2842 * Get the current interface media status on a 1000BASE-T device.
2843 */
2844 void
2845 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2846 {
2847 struct wm_softc *sc = ifp->if_softc;
2848
2849 mii_pollstat(&sc->sc_mii);
2850 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2851 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2852 }
2853
2854 /*
2855 * wm_gmii_mediachange: [ifmedia interface function]
2856 *
2857 * Set hardware to newly-selected media on a 1000BASE-T device.
2858 */
2859 int
2860 wm_gmii_mediachange(struct ifnet *ifp)
2861 {
2862 struct wm_softc *sc = ifp->if_softc;
2863
2864 if (ifp->if_flags & IFF_UP)
2865 mii_mediachg(&sc->sc_mii);
2866 return (0);
2867 }
2868
2869 #define MDI_IO CTRL_SWDPIN(2)
2870 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2871 #define MDI_CLK CTRL_SWDPIN(3)
2872
2873 static void
2874 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2875 {
2876 uint32_t i, v;
2877
2878 v = CSR_READ(sc, WMREG_CTRL);
2879 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2880 v |= MDI_DIR | CTRL_SWDPIO(3);
2881
2882 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2883 if (data & i)
2884 v |= MDI_IO;
2885 else
2886 v &= ~MDI_IO;
2887 CSR_WRITE(sc, WMREG_CTRL, v);
2888 delay(10);
2889 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2890 delay(10);
2891 CSR_WRITE(sc, WMREG_CTRL, v);
2892 delay(10);
2893 }
2894 }
2895
2896 static uint32_t
2897 i82543_mii_recvbits(struct wm_softc *sc)
2898 {
2899 uint32_t v, i, data = 0;
2900
2901 v = CSR_READ(sc, WMREG_CTRL);
2902 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2903 v |= CTRL_SWDPIO(3);
2904
2905 CSR_WRITE(sc, WMREG_CTRL, v);
2906 delay(10);
2907 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2908 delay(10);
2909 CSR_WRITE(sc, WMREG_CTRL, v);
2910 delay(10);
2911
2912 for (i = 0; i < 16; i++) {
2913 data <<= 1;
2914 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2915 delay(10);
2916 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2917 data |= 1;
2918 CSR_WRITE(sc, WMREG_CTRL, v);
2919 delay(10);
2920 }
2921
2922 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2923 delay(10);
2924 CSR_WRITE(sc, WMREG_CTRL, v);
2925 delay(10);
2926
2927 return (data);
2928 }
2929
2930 #undef MDI_IO
2931 #undef MDI_DIR
2932 #undef MDI_CLK
2933
2934 /*
2935 * wm_gmii_i82543_readreg: [mii interface function]
2936 *
2937 * Read a PHY register on the GMII (i82543 version).
2938 */
2939 int
2940 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2941 {
2942 struct wm_softc *sc = (void *) self;
2943 int rv;
2944
2945 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2946 i82543_mii_sendbits(sc, reg | (phy << 5) |
2947 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2948 rv = i82543_mii_recvbits(sc) & 0xffff;
2949
2950 DPRINTF(WM_DEBUG_GMII,
2951 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2952 sc->sc_dev.dv_xname, phy, reg, rv));
2953
2954 return (rv);
2955 }
2956
2957 /*
2958 * wm_gmii_i82543_writereg: [mii interface function]
2959 *
2960 * Write a PHY register on the GMII (i82543 version).
2961 */
2962 void
2963 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2964 {
2965 struct wm_softc *sc = (void *) self;
2966
2967 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2968 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2969 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2970 (MII_COMMAND_START << 30), 32);
2971 }
2972
2973 /*
2974 * wm_gmii_i82544_readreg: [mii interface function]
2975 *
2976 * Read a PHY register on the GMII.
2977 */
2978 int
2979 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2980 {
2981 struct wm_softc *sc = (void *) self;
2982 uint32_t mdic;
2983 int i, rv;
2984
2985 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2986 MDIC_REGADD(reg));
2987
2988 for (i = 0; i < 100; i++) {
2989 mdic = CSR_READ(sc, WMREG_MDIC);
2990 if (mdic & MDIC_READY)
2991 break;
2992 delay(10);
2993 }
2994
2995 if ((mdic & MDIC_READY) == 0) {
2996 printf("%s: MDIC read timed out: phy %d reg %d\n",
2997 sc->sc_dev.dv_xname, phy, reg);
2998 rv = 0;
2999 } else if (mdic & MDIC_E) {
3000 #if 0 /* This is normal if no PHY is present. */
3001 printf("%s: MDIC read error: phy %d reg %d\n",
3002 sc->sc_dev.dv_xname, phy, reg);
3003 #endif
3004 rv = 0;
3005 } else {
3006 rv = MDIC_DATA(mdic);
3007 if (rv == 0xffff)
3008 rv = 0;
3009 }
3010
3011 return (rv);
3012 }
3013
3014 /*
3015 * wm_gmii_i82544_writereg: [mii interface function]
3016 *
3017 * Write a PHY register on the GMII.
3018 */
3019 void
3020 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3021 {
3022 struct wm_softc *sc = (void *) self;
3023 uint32_t mdic;
3024 int i;
3025
3026 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3027 MDIC_REGADD(reg) | MDIC_DATA(val));
3028
3029 for (i = 0; i < 100; i++) {
3030 mdic = CSR_READ(sc, WMREG_MDIC);
3031 if (mdic & MDIC_READY)
3032 break;
3033 delay(10);
3034 }
3035
3036 if ((mdic & MDIC_READY) == 0)
3037 printf("%s: MDIC write timed out: phy %d reg %d\n",
3038 sc->sc_dev.dv_xname, phy, reg);
3039 else if (mdic & MDIC_E)
3040 printf("%s: MDIC write error: phy %d reg %d\n",
3041 sc->sc_dev.dv_xname, phy, reg);
3042 }
3043
3044 /*
3045 * wm_gmii_statchg: [mii interface function]
3046 *
3047 * Callback from MII layer when media changes.
3048 */
3049 void
3050 wm_gmii_statchg(struct device *self)
3051 {
3052 struct wm_softc *sc = (void *) self;
3053
3054 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3055
3056 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3057 DPRINTF(WM_DEBUG_LINK,
3058 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3059 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3060 } else {
3061 DPRINTF(WM_DEBUG_LINK,
3062 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3063 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3064 }
3065
3066 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3067 }
3068