if_wm.c revision 1.45 1 /* $NetBSD: if_wm.c,v 1.45 2003/10/20 05:40:03 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.45 2003/10/20 05:40:03 thorpej Exp $");
48
49 #include "bpfilter.h"
50 #include "rnd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/callout.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/ioctl.h>
60 #include <sys/errno.h>
61 #include <sys/device.h>
62 #include <sys/queue.h>
63
64 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
65
66 #if NRND > 0
67 #include <sys/rnd.h>
68 #endif
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_ether.h>
74
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <netinet/in.h> /* XXX for struct ip */
80 #include <netinet/in_systm.h> /* XXX for struct ip */
81 #include <netinet/ip.h> /* XXX for struct ip */
82 #include <netinet/tcp.h> /* XXX for struct tcphdr */
83
84 #include <machine/bus.h>
85 #include <machine/intr.h>
86 #include <machine/endian.h>
87
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 #include <dev/mii/mii_bitbang.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95
96 #include <dev/pci/if_wmreg.h>
97
98 #ifdef WM_DEBUG
99 #define WM_DEBUG_LINK 0x01
100 #define WM_DEBUG_TX 0x02
101 #define WM_DEBUG_RX 0x04
102 #define WM_DEBUG_GMII 0x08
103 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
104
105 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
106 #else
107 #define DPRINTF(x, y) /* nothing */
108 #endif /* WM_DEBUG */
109
110 /*
111 * Transmit descriptor list size. Due to errata, we can only have
112 * 256 hardware descriptors in the ring. We tell the upper layers
113 * that they can queue a lot of packets, and we go ahead and manage
114 * up to 64 of them at a time. We allow up to 16 DMA segments per
115 * packet.
116 */
117 #define WM_NTXSEGS 16
118 #define WM_IFQUEUELEN 256
119 #define WM_TXQUEUELEN 64
120 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
121 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
122 #define WM_NTXDESC 256
123 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
124 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
125 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
126
127 /*
128 * Receive descriptor list size. We have one Rx buffer for normal
129 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
130 * packet. We allocate 256 receive descriptors, each with a 2k
131 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
132 */
133 #define WM_NRXDESC 256
134 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
135 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
136 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
137
138 /*
139 * Control structures are DMA'd to the i82542 chip. We allocate them in
140 * a single clump that maps to a single DMA segment to make serveral things
141 * easier.
142 */
143 struct wm_control_data {
144 /*
145 * The transmit descriptors.
146 */
147 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
148
149 /*
150 * The receive descriptors.
151 */
152 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
153 };
154
155 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
156 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
157 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
158
159 /*
160 * Software state for transmit jobs.
161 */
162 struct wm_txsoft {
163 struct mbuf *txs_mbuf; /* head of our mbuf chain */
164 bus_dmamap_t txs_dmamap; /* our DMA map */
165 int txs_firstdesc; /* first descriptor in packet */
166 int txs_lastdesc; /* last descriptor in packet */
167 int txs_ndesc; /* # of descriptors used */
168 };
169
170 /*
171 * Software state for receive buffers. Each descriptor gets a
172 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
173 * more than one buffer, we chain them together.
174 */
175 struct wm_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 typedef enum {
181 WM_T_unknown = 0,
182 WM_T_82542_2_0, /* i82542 2.0 (really old) */
183 WM_T_82542_2_1, /* i82542 2.1+ (old) */
184 WM_T_82543, /* i82543 */
185 WM_T_82544, /* i82544 */
186 WM_T_82540, /* i82540 */
187 WM_T_82545, /* i82545 */
188 WM_T_82545_3, /* i82545 3.0+ */
189 WM_T_82546, /* i82546 */
190 WM_T_82546_3, /* i82546 3.0+ */
191 WM_T_82541, /* i82541 */
192 WM_T_82541_2, /* i82541 2.0+ */
193 WM_T_82547, /* i82547 */
194 WM_T_82547_2, /* i82547 2.0+ */
195 } wm_chip_type;
196
197 /*
198 * Software state per device.
199 */
200 struct wm_softc {
201 struct device sc_dev; /* generic device information */
202 bus_space_tag_t sc_st; /* bus space tag */
203 bus_space_handle_t sc_sh; /* bus space handle */
204 bus_dma_tag_t sc_dmat; /* bus DMA tag */
205 struct ethercom sc_ethercom; /* ethernet common data */
206 void *sc_sdhook; /* shutdown hook */
207
208 wm_chip_type sc_type; /* chip type */
209 int sc_flags; /* flags; see below */
210
211 void *sc_ih; /* interrupt cookie */
212
213 int sc_ee_addrbits; /* EEPROM address bits */
214
215 struct mii_data sc_mii; /* MII/media information */
216
217 struct callout sc_tick_ch; /* tick callout */
218
219 bus_dmamap_t sc_cddmamap; /* control data DMA map */
220 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
221
222 int sc_align_tweak;
223
224 /*
225 * Software state for the transmit and receive descriptors.
226 */
227 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
228 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
229
230 /*
231 * Control data structures.
232 */
233 struct wm_control_data *sc_control_data;
234 #define sc_txdescs sc_control_data->wcd_txdescs
235 #define sc_rxdescs sc_control_data->wcd_rxdescs
236
237 #ifdef WM_EVENT_COUNTERS
238 /* Event counters. */
239 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
240 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
241 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
242 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
243 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
244 struct evcnt sc_ev_rxintr; /* Rx interrupts */
245 struct evcnt sc_ev_linkintr; /* Link interrupts */
246
247 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
248 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
249 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
250 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
251
252 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
253 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
254 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
255
256 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
257 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
258
259 struct evcnt sc_ev_tu; /* Tx underrun */
260 #endif /* WM_EVENT_COUNTERS */
261
262 bus_addr_t sc_tdt_reg; /* offset of TDT register */
263
264 int sc_txfree; /* number of free Tx descriptors */
265 int sc_txnext; /* next ready Tx descriptor */
266
267 int sc_txsfree; /* number of free Tx jobs */
268 int sc_txsnext; /* next free Tx job */
269 int sc_txsdirty; /* dirty Tx jobs */
270
271 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
272 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
273
274 bus_addr_t sc_rdt_reg; /* offset of RDT register */
275
276 int sc_rxptr; /* next ready Rx descriptor/queue ent */
277 int sc_rxdiscard;
278 int sc_rxlen;
279 struct mbuf *sc_rxhead;
280 struct mbuf *sc_rxtail;
281 struct mbuf **sc_rxtailp;
282
283 uint32_t sc_ctrl; /* prototype CTRL register */
284 #if 0
285 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
286 #endif
287 uint32_t sc_icr; /* prototype interrupt bits */
288 uint32_t sc_tctl; /* prototype TCTL register */
289 uint32_t sc_rctl; /* prototype RCTL register */
290 uint32_t sc_txcw; /* prototype TXCW register */
291 uint32_t sc_tipg; /* prototype TIPG register */
292
293 int sc_tbi_linkup; /* TBI link status */
294 int sc_tbi_anstate; /* autonegotiation state */
295
296 int sc_mchash_type; /* multicast filter offset */
297
298 #if NRND > 0
299 rndsource_element_t rnd_source; /* random source */
300 #endif
301 };
302
303 #define WM_RXCHAIN_RESET(sc) \
304 do { \
305 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
306 *(sc)->sc_rxtailp = NULL; \
307 (sc)->sc_rxlen = 0; \
308 } while (/*CONSTCOND*/0)
309
310 #define WM_RXCHAIN_LINK(sc, m) \
311 do { \
312 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
313 (sc)->sc_rxtailp = &(m)->m_next; \
314 } while (/*CONSTCOND*/0)
315
316 /* sc_flags */
317 #define WM_F_HAS_MII 0x01 /* has MII */
318 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
319
320 #ifdef WM_EVENT_COUNTERS
321 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
322 #else
323 #define WM_EVCNT_INCR(ev) /* nothing */
324 #endif
325
326 #define CSR_READ(sc, reg) \
327 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
328 #define CSR_WRITE(sc, reg, val) \
329 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
330
331 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
332 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
333
334 #define WM_CDTXSYNC(sc, x, n, ops) \
335 do { \
336 int __x, __n; \
337 \
338 __x = (x); \
339 __n = (n); \
340 \
341 /* If it will wrap around, sync to the end of the ring. */ \
342 if ((__x + __n) > WM_NTXDESC) { \
343 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
344 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
345 (WM_NTXDESC - __x), (ops)); \
346 __n -= (WM_NTXDESC - __x); \
347 __x = 0; \
348 } \
349 \
350 /* Now sync whatever is left. */ \
351 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
352 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
353 } while (/*CONSTCOND*/0)
354
355 #define WM_CDRXSYNC(sc, x, ops) \
356 do { \
357 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
358 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
359 } while (/*CONSTCOND*/0)
360
361 #define WM_INIT_RXDESC(sc, x) \
362 do { \
363 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
364 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
365 struct mbuf *__m = __rxs->rxs_mbuf; \
366 \
367 /* \
368 * Note: We scoot the packet forward 2 bytes in the buffer \
369 * so that the payload after the Ethernet header is aligned \
370 * to a 4-byte boundary. \
371 * \
372 * XXX BRAINDAMAGE ALERT! \
373 * The stupid chip uses the same size for every buffer, which \
374 * is set in the Receive Control register. We are using the 2K \
375 * size option, but what we REALLY want is (2K - 2)! For this \
376 * reason, we can't "scoot" packets longer than the standard \
377 * Ethernet MTU. On strict-alignment platforms, if the total \
378 * size exceeds (2K - 2) we set align_tweak to 0 and let \
379 * the upper layer copy the headers. \
380 */ \
381 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
382 \
383 __rxd->wrx_addr.wa_low = \
384 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
385 (sc)->sc_align_tweak); \
386 __rxd->wrx_addr.wa_high = 0; \
387 __rxd->wrx_len = 0; \
388 __rxd->wrx_cksum = 0; \
389 __rxd->wrx_status = 0; \
390 __rxd->wrx_errors = 0; \
391 __rxd->wrx_special = 0; \
392 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
393 \
394 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
395 } while (/*CONSTCOND*/0)
396
397 void wm_start(struct ifnet *);
398 void wm_watchdog(struct ifnet *);
399 int wm_ioctl(struct ifnet *, u_long, caddr_t);
400 int wm_init(struct ifnet *);
401 void wm_stop(struct ifnet *, int);
402
403 void wm_shutdown(void *);
404
405 void wm_reset(struct wm_softc *);
406 void wm_rxdrain(struct wm_softc *);
407 int wm_add_rxbuf(struct wm_softc *, int);
408 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
409 void wm_tick(void *);
410
411 void wm_set_filter(struct wm_softc *);
412
413 int wm_intr(void *);
414 void wm_txintr(struct wm_softc *);
415 void wm_rxintr(struct wm_softc *);
416 void wm_linkintr(struct wm_softc *, uint32_t);
417
418 void wm_tbi_mediainit(struct wm_softc *);
419 int wm_tbi_mediachange(struct ifnet *);
420 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
421
422 void wm_tbi_set_linkled(struct wm_softc *);
423 void wm_tbi_check_link(struct wm_softc *);
424
425 void wm_gmii_reset(struct wm_softc *);
426
427 int wm_gmii_i82543_readreg(struct device *, int, int);
428 void wm_gmii_i82543_writereg(struct device *, int, int, int);
429
430 int wm_gmii_i82544_readreg(struct device *, int, int);
431 void wm_gmii_i82544_writereg(struct device *, int, int, int);
432
433 void wm_gmii_statchg(struct device *);
434
435 void wm_gmii_mediainit(struct wm_softc *);
436 int wm_gmii_mediachange(struct ifnet *);
437 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
438
439 int wm_match(struct device *, struct cfdata *, void *);
440 void wm_attach(struct device *, struct device *, void *);
441
442 CFATTACH_DECL(wm, sizeof(struct wm_softc),
443 wm_match, wm_attach, NULL, NULL);
444
445 /*
446 * Devices supported by this driver.
447 */
448 const struct wm_product {
449 pci_vendor_id_t wmp_vendor;
450 pci_product_id_t wmp_product;
451 const char *wmp_name;
452 wm_chip_type wmp_type;
453 int wmp_flags;
454 #define WMP_F_1000X 0x01
455 #define WMP_F_1000T 0x02
456 } wm_products[] = {
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
458 "Intel i82542 1000BASE-X Ethernet",
459 WM_T_82542_2_1, WMP_F_1000X },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
462 "Intel i82543GC 1000BASE-X Ethernet",
463 WM_T_82543, WMP_F_1000X },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
466 "Intel i82543GC 1000BASE-T Ethernet",
467 WM_T_82543, WMP_F_1000T },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
470 "Intel i82544EI 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
474 "Intel i82544EI 1000BASE-X Ethernet",
475 WM_T_82544, WMP_F_1000X },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
478 "Intel i82544GC 1000BASE-T Ethernet",
479 WM_T_82544, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
482 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
483 WM_T_82544, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
486 "Intel i82540EM 1000BASE-T Ethernet",
487 WM_T_82540, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
490 "Intel i82540EP 1000BASE-T Ethernet",
491 WM_T_82540, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
494 "Intel i82540EP 1000BASE-T Ethernet",
495 WM_T_82540, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
498 "Intel i82540EP 1000BASE-T Ethernet",
499 WM_T_82540, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
502 "Intel i82545EM 1000BASE-T Ethernet",
503 WM_T_82545, WMP_F_1000T },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
506 "Intel i82546EB 1000BASE-T Ethernet",
507 WM_T_82546, WMP_F_1000T },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
510 "Intel i82546EB 1000BASE-T Ethernet",
511 WM_T_82546, WMP_F_1000T },
512
513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
514 "Intel i82545EM 1000BASE-X Ethernet",
515 WM_T_82545, WMP_F_1000X },
516
517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
518 "Intel i82546EB 1000BASE-X Ethernet",
519 WM_T_82546, WMP_F_1000X },
520
521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
522 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
523 WM_T_82540, WMP_F_1000T },
524
525 { 0, 0,
526 NULL,
527 0, 0 },
528 };
529
530 #ifdef WM_EVENT_COUNTERS
531 #if WM_NTXSEGS != 16
532 #error Update wm_txseg_evcnt_names
533 #endif
534 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
535 "txseg1",
536 "txseg2",
537 "txseg3",
538 "txseg4",
539 "txseg5",
540 "txseg6",
541 "txseg7",
542 "txseg8",
543 "txseg9",
544 "txseg10",
545 "txseg11",
546 "txseg12",
547 "txseg13",
548 "txseg14",
549 "txseg15",
550 "txseg16",
551 };
552 #endif /* WM_EVENT_COUNTERS */
553
554 static const struct wm_product *
555 wm_lookup(const struct pci_attach_args *pa)
556 {
557 const struct wm_product *wmp;
558
559 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
560 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
561 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
562 return (wmp);
563 }
564 return (NULL);
565 }
566
567 int
568 wm_match(struct device *parent, struct cfdata *cf, void *aux)
569 {
570 struct pci_attach_args *pa = aux;
571
572 if (wm_lookup(pa) != NULL)
573 return (1);
574
575 return (0);
576 }
577
578 void
579 wm_attach(struct device *parent, struct device *self, void *aux)
580 {
581 struct wm_softc *sc = (void *) self;
582 struct pci_attach_args *pa = aux;
583 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
584 pci_chipset_tag_t pc = pa->pa_pc;
585 pci_intr_handle_t ih;
586 const char *intrstr = NULL;
587 const char *eetype;
588 bus_space_tag_t memt;
589 bus_space_handle_t memh;
590 bus_dma_segment_t seg;
591 int memh_valid;
592 int i, rseg, error;
593 const struct wm_product *wmp;
594 uint8_t enaddr[ETHER_ADDR_LEN];
595 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
596 pcireg_t preg, memtype;
597 uint32_t reg;
598 int pmreg;
599
600 callout_init(&sc->sc_tick_ch);
601
602 wmp = wm_lookup(pa);
603 if (wmp == NULL) {
604 printf("\n");
605 panic("wm_attach: impossible");
606 }
607
608 sc->sc_dmat = pa->pa_dmat;
609
610 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
611 aprint_naive(": Ethernet controller\n");
612 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
613
614 sc->sc_type = wmp->wmp_type;
615 if (sc->sc_type < WM_T_82543) {
616 if (preg < 2) {
617 aprint_error("%s: i82542 must be at least rev. 2\n",
618 sc->sc_dev.dv_xname);
619 return;
620 }
621 if (preg < 3)
622 sc->sc_type = WM_T_82542_2_0;
623 }
624
625 /*
626 * Map the device.
627 */
628 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
629 switch (memtype) {
630 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
631 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
632 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
633 memtype, 0, &memt, &memh, NULL, NULL) == 0);
634 break;
635 default:
636 memh_valid = 0;
637 }
638
639 if (memh_valid) {
640 sc->sc_st = memt;
641 sc->sc_sh = memh;
642 } else {
643 aprint_error("%s: unable to map device registers\n",
644 sc->sc_dev.dv_xname);
645 return;
646 }
647
648 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
649 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
650 preg |= PCI_COMMAND_MASTER_ENABLE;
651 if (sc->sc_type < WM_T_82542_2_1)
652 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
653 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
654
655 /* Get it out of power save mode, if needed. */
656 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
657 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
658 PCI_PMCSR_STATE_MASK;
659 if (preg == PCI_PMCSR_STATE_D3) {
660 /*
661 * The card has lost all configuration data in
662 * this state, so punt.
663 */
664 aprint_error("%s: unable to wake from power state D3\n",
665 sc->sc_dev.dv_xname);
666 return;
667 }
668 if (preg != PCI_PMCSR_STATE_D0) {
669 aprint_normal("%s: waking up from power state D%d\n",
670 sc->sc_dev.dv_xname, preg);
671 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
672 PCI_PMCSR_STATE_D0);
673 }
674 }
675
676 /*
677 * Map and establish our interrupt.
678 */
679 if (pci_intr_map(pa, &ih)) {
680 aprint_error("%s: unable to map interrupt\n",
681 sc->sc_dev.dv_xname);
682 return;
683 }
684 intrstr = pci_intr_string(pc, ih);
685 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
686 if (sc->sc_ih == NULL) {
687 aprint_error("%s: unable to establish interrupt",
688 sc->sc_dev.dv_xname);
689 if (intrstr != NULL)
690 aprint_normal(" at %s", intrstr);
691 aprint_normal("\n");
692 return;
693 }
694 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
695
696 /*
697 * Allocate the control data structures, and create and load the
698 * DMA map for it.
699 */
700 if ((error = bus_dmamem_alloc(sc->sc_dmat,
701 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
702 0)) != 0) {
703 aprint_error(
704 "%s: unable to allocate control data, error = %d\n",
705 sc->sc_dev.dv_xname, error);
706 goto fail_0;
707 }
708
709 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
710 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
711 0)) != 0) {
712 aprint_error("%s: unable to map control data, error = %d\n",
713 sc->sc_dev.dv_xname, error);
714 goto fail_1;
715 }
716
717 if ((error = bus_dmamap_create(sc->sc_dmat,
718 sizeof(struct wm_control_data), 1,
719 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
720 aprint_error("%s: unable to create control data DMA map, "
721 "error = %d\n", sc->sc_dev.dv_xname, error);
722 goto fail_2;
723 }
724
725 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
726 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
727 0)) != 0) {
728 aprint_error(
729 "%s: unable to load control data DMA map, error = %d\n",
730 sc->sc_dev.dv_xname, error);
731 goto fail_3;
732 }
733
734 /*
735 * Create the transmit buffer DMA maps.
736 */
737 for (i = 0; i < WM_TXQUEUELEN; i++) {
738 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
739 WM_NTXSEGS, MCLBYTES, 0, 0,
740 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
741 aprint_error("%s: unable to create Tx DMA map %d, "
742 "error = %d\n", sc->sc_dev.dv_xname, i, error);
743 goto fail_4;
744 }
745 }
746
747 /*
748 * Create the receive buffer DMA maps.
749 */
750 for (i = 0; i < WM_NRXDESC; i++) {
751 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
752 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
753 aprint_error("%s: unable to create Rx DMA map %d, "
754 "error = %d\n", sc->sc_dev.dv_xname, i, error);
755 goto fail_5;
756 }
757 sc->sc_rxsoft[i].rxs_mbuf = NULL;
758 }
759
760 /*
761 * Reset the chip to a known state.
762 */
763 wm_reset(sc);
764
765 /*
766 * Get some information about the EEPROM.
767 */
768 eetype = "MicroWire";
769 if (sc->sc_type >= WM_T_82540)
770 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
771 if (sc->sc_type <= WM_T_82544)
772 sc->sc_ee_addrbits = 6;
773 else if (sc->sc_type <= WM_T_82546_3) {
774 reg = CSR_READ(sc, WMREG_EECD);
775 if (reg & EECD_EE_SIZE)
776 sc->sc_ee_addrbits = 8;
777 else
778 sc->sc_ee_addrbits = 6;
779 }
780 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
781 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
782 sc->sc_ee_addrbits, eetype);
783
784 /*
785 * Read the Ethernet address from the EEPROM.
786 */
787 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
788 sizeof(myea) / sizeof(myea[0]), myea);
789 enaddr[0] = myea[0] & 0xff;
790 enaddr[1] = myea[0] >> 8;
791 enaddr[2] = myea[1] & 0xff;
792 enaddr[3] = myea[1] >> 8;
793 enaddr[4] = myea[2] & 0xff;
794 enaddr[5] = myea[2] >> 8;
795
796 /*
797 * Toggle the LSB of the MAC address on the second port
798 * of the i82546.
799 */
800 if (sc->sc_type == WM_T_82546) {
801 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
802 enaddr[5] ^= 1;
803 }
804
805 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
806 ether_sprintf(enaddr));
807
808 /*
809 * Read the config info from the EEPROM, and set up various
810 * bits in the control registers based on their contents.
811 */
812 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
813 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
814 if (sc->sc_type >= WM_T_82544)
815 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
816
817 if (cfg1 & EEPROM_CFG1_ILOS)
818 sc->sc_ctrl |= CTRL_ILOS;
819 if (sc->sc_type >= WM_T_82544) {
820 sc->sc_ctrl |=
821 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
822 CTRL_SWDPIO_SHIFT;
823 sc->sc_ctrl |=
824 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
825 CTRL_SWDPINS_SHIFT;
826 } else {
827 sc->sc_ctrl |=
828 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
829 CTRL_SWDPIO_SHIFT;
830 }
831
832 #if 0
833 if (sc->sc_type >= WM_T_82544) {
834 if (cfg1 & EEPROM_CFG1_IPS0)
835 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
836 if (cfg1 & EEPROM_CFG1_IPS1)
837 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
838 sc->sc_ctrl_ext |=
839 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
840 CTRL_EXT_SWDPIO_SHIFT;
841 sc->sc_ctrl_ext |=
842 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
843 CTRL_EXT_SWDPINS_SHIFT;
844 } else {
845 sc->sc_ctrl_ext |=
846 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
847 CTRL_EXT_SWDPIO_SHIFT;
848 }
849 #endif
850
851 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
852 #if 0
853 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
854 #endif
855
856 /*
857 * Set up some register offsets that are different between
858 * the i82542 and the i82543 and later chips.
859 */
860 if (sc->sc_type < WM_T_82543) {
861 sc->sc_rdt_reg = WMREG_OLD_RDT0;
862 sc->sc_tdt_reg = WMREG_OLD_TDT;
863 } else {
864 sc->sc_rdt_reg = WMREG_RDT;
865 sc->sc_tdt_reg = WMREG_TDT;
866 }
867
868 /*
869 * Determine if we should use flow control. We should
870 * always use it, unless we're on a i82542 < 2.1.
871 */
872 if (sc->sc_type >= WM_T_82542_2_1)
873 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
874
875 /*
876 * Determine if we're TBI or GMII mode, and initialize the
877 * media structures accordingly.
878 */
879 if (sc->sc_type < WM_T_82543 ||
880 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
881 if (wmp->wmp_flags & WMP_F_1000T)
882 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
883 "product!\n", sc->sc_dev.dv_xname);
884 wm_tbi_mediainit(sc);
885 } else {
886 if (wmp->wmp_flags & WMP_F_1000X)
887 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
888 "product!\n", sc->sc_dev.dv_xname);
889 wm_gmii_mediainit(sc);
890 }
891
892 ifp = &sc->sc_ethercom.ec_if;
893 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
894 ifp->if_softc = sc;
895 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
896 ifp->if_ioctl = wm_ioctl;
897 ifp->if_start = wm_start;
898 ifp->if_watchdog = wm_watchdog;
899 ifp->if_init = wm_init;
900 ifp->if_stop = wm_stop;
901 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
902 IFQ_SET_READY(&ifp->if_snd);
903
904 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
905
906 /*
907 * If we're a i82543 or greater, we can support VLANs.
908 */
909 if (sc->sc_type >= WM_T_82543)
910 sc->sc_ethercom.ec_capabilities |=
911 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
912
913 /*
914 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
915 * on i82543 and later.
916 */
917 if (sc->sc_type >= WM_T_82543)
918 ifp->if_capabilities |=
919 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
920
921 /*
922 * Attach the interface.
923 */
924 if_attach(ifp);
925 ether_ifattach(ifp, enaddr);
926 #if NRND > 0
927 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
928 RND_TYPE_NET, 0);
929 #endif
930
931 #ifdef WM_EVENT_COUNTERS
932 /* Attach event counters. */
933 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
934 NULL, sc->sc_dev.dv_xname, "txsstall");
935 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txdstall");
937 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
938 NULL, sc->sc_dev.dv_xname, "txforceintr");
939 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
940 NULL, sc->sc_dev.dv_xname, "txdw");
941 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
942 NULL, sc->sc_dev.dv_xname, "txqe");
943 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
944 NULL, sc->sc_dev.dv_xname, "rxintr");
945 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
946 NULL, sc->sc_dev.dv_xname, "linkintr");
947
948 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
949 NULL, sc->sc_dev.dv_xname, "rxipsum");
950 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
951 NULL, sc->sc_dev.dv_xname, "rxtusum");
952 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
953 NULL, sc->sc_dev.dv_xname, "txipsum");
954 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
955 NULL, sc->sc_dev.dv_xname, "txtusum");
956
957 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
958 NULL, sc->sc_dev.dv_xname, "txctx init");
959 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
960 NULL, sc->sc_dev.dv_xname, "txctx hit");
961 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
962 NULL, sc->sc_dev.dv_xname, "txctx miss");
963
964 for (i = 0; i < WM_NTXSEGS; i++)
965 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
966 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
967
968 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
969 NULL, sc->sc_dev.dv_xname, "txdrop");
970
971 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
972 NULL, sc->sc_dev.dv_xname, "tu");
973 #endif /* WM_EVENT_COUNTERS */
974
975 /*
976 * Make sure the interface is shutdown during reboot.
977 */
978 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
979 if (sc->sc_sdhook == NULL)
980 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
981 sc->sc_dev.dv_xname);
982 return;
983
984 /*
985 * Free any resources we've allocated during the failed attach
986 * attempt. Do this in reverse order and fall through.
987 */
988 fail_5:
989 for (i = 0; i < WM_NRXDESC; i++) {
990 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
991 bus_dmamap_destroy(sc->sc_dmat,
992 sc->sc_rxsoft[i].rxs_dmamap);
993 }
994 fail_4:
995 for (i = 0; i < WM_TXQUEUELEN; i++) {
996 if (sc->sc_txsoft[i].txs_dmamap != NULL)
997 bus_dmamap_destroy(sc->sc_dmat,
998 sc->sc_txsoft[i].txs_dmamap);
999 }
1000 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1001 fail_3:
1002 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1003 fail_2:
1004 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1005 sizeof(struct wm_control_data));
1006 fail_1:
1007 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1008 fail_0:
1009 return;
1010 }
1011
1012 /*
1013 * wm_shutdown:
1014 *
1015 * Make sure the interface is stopped at reboot time.
1016 */
1017 void
1018 wm_shutdown(void *arg)
1019 {
1020 struct wm_softc *sc = arg;
1021
1022 wm_stop(&sc->sc_ethercom.ec_if, 1);
1023 }
1024
1025 /*
1026 * wm_tx_cksum:
1027 *
1028 * Set up TCP/IP checksumming parameters for the
1029 * specified packet.
1030 */
1031 static int
1032 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1033 uint32_t *fieldsp)
1034 {
1035 struct mbuf *m0 = txs->txs_mbuf;
1036 struct livengood_tcpip_ctxdesc *t;
1037 uint32_t fields = 0, ipcs, tucs;
1038 struct ip *ip;
1039 struct ether_header *eh;
1040 int offset, iphl;
1041
1042 /*
1043 * XXX It would be nice if the mbuf pkthdr had offset
1044 * fields for the protocol headers.
1045 */
1046
1047 eh = mtod(m0, struct ether_header *);
1048 switch (htons(eh->ether_type)) {
1049 case ETHERTYPE_IP:
1050 iphl = sizeof(struct ip);
1051 offset = ETHER_HDR_LEN;
1052 break;
1053
1054 case ETHERTYPE_VLAN:
1055 iphl = sizeof(struct ip);
1056 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1057 break;
1058
1059 default:
1060 /*
1061 * Don't support this protocol or encapsulation.
1062 */
1063 *fieldsp = 0;
1064 *cmdp = 0;
1065 return (0);
1066 }
1067
1068 if (m0->m_len < (offset + iphl)) {
1069 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1070 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1071 "packet dropped\n", sc->sc_dev.dv_xname);
1072 return (ENOMEM);
1073 }
1074 m0 = txs->txs_mbuf;
1075 }
1076
1077 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1078 iphl = ip->ip_hl << 2;
1079
1080 /*
1081 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1082 * offload feature, if we load the context descriptor, we
1083 * MUST provide valid values for IPCSS and TUCSS fields.
1084 */
1085
1086 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1087 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1088 fields |= htole32(WTX_IXSM);
1089 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1090 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1091 WTX_TCPIP_IPCSE(offset + iphl - 1));
1092 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1093 /* Use the cached value. */
1094 ipcs = sc->sc_txctx_ipcs;
1095 } else {
1096 /* Just initialize it to the likely value anyway. */
1097 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1098 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1099 WTX_TCPIP_IPCSE(offset + iphl - 1));
1100 }
1101
1102 offset += iphl;
1103
1104 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1105 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1106 fields |= htole32(WTX_TXSM);
1107 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1108 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1109 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1110 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1111 /* Use the cached value. */
1112 tucs = sc->sc_txctx_tucs;
1113 } else {
1114 /* Just initialize it to a valid TCP context. */
1115 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1116 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1117 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1118 }
1119
1120 if (sc->sc_txctx_ipcs == ipcs &&
1121 sc->sc_txctx_tucs == tucs) {
1122 /* Cached context is fine. */
1123 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1124 } else {
1125 /* Fill in the context descriptor. */
1126 #ifdef WM_EVENT_COUNTERS
1127 if (sc->sc_txctx_ipcs == 0xffffffff &&
1128 sc->sc_txctx_tucs == 0xffffffff)
1129 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1130 else
1131 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1132 #endif
1133 t = (struct livengood_tcpip_ctxdesc *)
1134 &sc->sc_txdescs[sc->sc_txnext];
1135 t->tcpip_ipcs = ipcs;
1136 t->tcpip_tucs = tucs;
1137 t->tcpip_cmdlen =
1138 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1139 t->tcpip_seg = 0;
1140 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1141
1142 sc->sc_txctx_ipcs = ipcs;
1143 sc->sc_txctx_tucs = tucs;
1144
1145 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1146 txs->txs_ndesc++;
1147 }
1148
1149 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1150 *fieldsp = fields;
1151
1152 return (0);
1153 }
1154
1155 /*
1156 * wm_start: [ifnet interface function]
1157 *
1158 * Start packet transmission on the interface.
1159 */
1160 void
1161 wm_start(struct ifnet *ifp)
1162 {
1163 struct wm_softc *sc = ifp->if_softc;
1164 struct mbuf *m0;
1165 #if 0 /* XXXJRT */
1166 struct m_tag *mtag;
1167 #endif
1168 struct wm_txsoft *txs;
1169 bus_dmamap_t dmamap;
1170 int error, nexttx, lasttx, ofree, seg;
1171 uint32_t cksumcmd, cksumfields;
1172
1173 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1174 return;
1175
1176 /*
1177 * Remember the previous number of free descriptors.
1178 */
1179 ofree = sc->sc_txfree;
1180
1181 /*
1182 * Loop through the send queue, setting up transmit descriptors
1183 * until we drain the queue, or use up all available transmit
1184 * descriptors.
1185 */
1186 for (;;) {
1187 /* Grab a packet off the queue. */
1188 IFQ_POLL(&ifp->if_snd, m0);
1189 if (m0 == NULL)
1190 break;
1191
1192 DPRINTF(WM_DEBUG_TX,
1193 ("%s: TX: have packet to transmit: %p\n",
1194 sc->sc_dev.dv_xname, m0));
1195
1196 /* Get a work queue entry. */
1197 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1198 wm_txintr(sc);
1199 if (sc->sc_txsfree == 0) {
1200 DPRINTF(WM_DEBUG_TX,
1201 ("%s: TX: no free job descriptors\n",
1202 sc->sc_dev.dv_xname));
1203 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1204 break;
1205 }
1206 }
1207
1208 txs = &sc->sc_txsoft[sc->sc_txsnext];
1209 dmamap = txs->txs_dmamap;
1210
1211 /*
1212 * Load the DMA map. If this fails, the packet either
1213 * didn't fit in the allotted number of segments, or we
1214 * were short on resources. For the too-many-segments
1215 * case, we simply report an error and drop the packet,
1216 * since we can't sanely copy a jumbo packet to a single
1217 * buffer.
1218 */
1219 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1220 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1221 if (error) {
1222 if (error == EFBIG) {
1223 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1224 printf("%s: Tx packet consumes too many "
1225 "DMA segments, dropping...\n",
1226 sc->sc_dev.dv_xname);
1227 IFQ_DEQUEUE(&ifp->if_snd, m0);
1228 m_freem(m0);
1229 continue;
1230 }
1231 /*
1232 * Short on resources, just stop for now.
1233 */
1234 DPRINTF(WM_DEBUG_TX,
1235 ("%s: TX: dmamap load failed: %d\n",
1236 sc->sc_dev.dv_xname, error));
1237 break;
1238 }
1239
1240 /*
1241 * Ensure we have enough descriptors free to describe
1242 * the packet. Note, we always reserve one descriptor
1243 * at the end of the ring due to the semantics of the
1244 * TDT register, plus one more in the event we need
1245 * to re-load checksum offload context.
1246 */
1247 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1248 /*
1249 * Not enough free descriptors to transmit this
1250 * packet. We haven't committed anything yet,
1251 * so just unload the DMA map, put the packet
1252 * pack on the queue, and punt. Notify the upper
1253 * layer that there are no more slots left.
1254 */
1255 DPRINTF(WM_DEBUG_TX,
1256 ("%s: TX: need %d descriptors, have %d\n",
1257 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1258 sc->sc_txfree - 1));
1259 ifp->if_flags |= IFF_OACTIVE;
1260 bus_dmamap_unload(sc->sc_dmat, dmamap);
1261 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1262 break;
1263 }
1264
1265 IFQ_DEQUEUE(&ifp->if_snd, m0);
1266
1267 /*
1268 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1269 */
1270
1271 /* Sync the DMA map. */
1272 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1273 BUS_DMASYNC_PREWRITE);
1274
1275 DPRINTF(WM_DEBUG_TX,
1276 ("%s: TX: packet has %d DMA segments\n",
1277 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1278
1279 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1280
1281 /*
1282 * Store a pointer to the packet so that we can free it
1283 * later.
1284 *
1285 * Initially, we consider the number of descriptors the
1286 * packet uses the number of DMA segments. This may be
1287 * incremented by 1 if we do checksum offload (a descriptor
1288 * is used to set the checksum context).
1289 */
1290 txs->txs_mbuf = m0;
1291 txs->txs_firstdesc = sc->sc_txnext;
1292 txs->txs_ndesc = dmamap->dm_nsegs;
1293
1294 /*
1295 * Set up checksum offload parameters for
1296 * this packet.
1297 */
1298 if (m0->m_pkthdr.csum_flags &
1299 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1300 if (wm_tx_cksum(sc, txs, &cksumcmd,
1301 &cksumfields) != 0) {
1302 /* Error message already displayed. */
1303 bus_dmamap_unload(sc->sc_dmat, dmamap);
1304 continue;
1305 }
1306 } else {
1307 cksumcmd = 0;
1308 cksumfields = 0;
1309 }
1310
1311 cksumcmd |= htole32(WTX_CMD_IDE);
1312
1313 /*
1314 * Initialize the transmit descriptor.
1315 */
1316 for (nexttx = sc->sc_txnext, seg = 0;
1317 seg < dmamap->dm_nsegs;
1318 seg++, nexttx = WM_NEXTTX(nexttx)) {
1319 /*
1320 * Note: we currently only use 32-bit DMA
1321 * addresses.
1322 */
1323 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1324 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1325 htole32(dmamap->dm_segs[seg].ds_addr);
1326 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1327 htole32(dmamap->dm_segs[seg].ds_len);
1328 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1329 cksumfields;
1330 lasttx = nexttx;
1331
1332 DPRINTF(WM_DEBUG_TX,
1333 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1334 sc->sc_dev.dv_xname, nexttx,
1335 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1336 (uint32_t) dmamap->dm_segs[seg].ds_len));
1337 }
1338
1339 /*
1340 * Set up the command byte on the last descriptor of
1341 * the packet. If we're in the interrupt delay window,
1342 * delay the interrupt.
1343 */
1344 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1345 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1346
1347 #if 0 /* XXXJRT */
1348 /*
1349 * If VLANs are enabled and the packet has a VLAN tag, set
1350 * up the descriptor to encapsulate the packet for us.
1351 *
1352 * This is only valid on the last descriptor of the packet.
1353 */
1354 if (sc->sc_ethercom.ec_nvlans != 0 &&
1355 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1356 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1357 htole32(WTX_CMD_VLE);
1358 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1359 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1360 }
1361 #endif /* XXXJRT */
1362
1363 txs->txs_lastdesc = lasttx;
1364
1365 DPRINTF(WM_DEBUG_TX,
1366 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1367 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1368
1369 /* Sync the descriptors we're using. */
1370 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1371 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1372
1373 /* Give the packet to the chip. */
1374 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1375
1376 DPRINTF(WM_DEBUG_TX,
1377 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1378
1379 DPRINTF(WM_DEBUG_TX,
1380 ("%s: TX: finished transmitting packet, job %d\n",
1381 sc->sc_dev.dv_xname, sc->sc_txsnext));
1382
1383 /* Advance the tx pointer. */
1384 sc->sc_txfree -= txs->txs_ndesc;
1385 sc->sc_txnext = nexttx;
1386
1387 sc->sc_txsfree--;
1388 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1389
1390 #if NBPFILTER > 0
1391 /* Pass the packet to any BPF listeners. */
1392 if (ifp->if_bpf)
1393 bpf_mtap(ifp->if_bpf, m0);
1394 #endif /* NBPFILTER > 0 */
1395 }
1396
1397 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1398 /* No more slots; notify upper layer. */
1399 ifp->if_flags |= IFF_OACTIVE;
1400 }
1401
1402 if (sc->sc_txfree != ofree) {
1403 /* Set a watchdog timer in case the chip flakes out. */
1404 ifp->if_timer = 5;
1405 }
1406 }
1407
1408 /*
1409 * wm_watchdog: [ifnet interface function]
1410 *
1411 * Watchdog timer handler.
1412 */
1413 void
1414 wm_watchdog(struct ifnet *ifp)
1415 {
1416 struct wm_softc *sc = ifp->if_softc;
1417
1418 /*
1419 * Since we're using delayed interrupts, sweep up
1420 * before we report an error.
1421 */
1422 wm_txintr(sc);
1423
1424 if (sc->sc_txfree != WM_NTXDESC) {
1425 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1426 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1427 sc->sc_txnext);
1428 ifp->if_oerrors++;
1429
1430 /* Reset the interface. */
1431 (void) wm_init(ifp);
1432 }
1433
1434 /* Try to get more packets going. */
1435 wm_start(ifp);
1436 }
1437
1438 /*
1439 * wm_ioctl: [ifnet interface function]
1440 *
1441 * Handle control requests from the operator.
1442 */
1443 int
1444 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1445 {
1446 struct wm_softc *sc = ifp->if_softc;
1447 struct ifreq *ifr = (struct ifreq *) data;
1448 int s, error;
1449
1450 s = splnet();
1451
1452 switch (cmd) {
1453 case SIOCSIFMEDIA:
1454 case SIOCGIFMEDIA:
1455 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1456 break;
1457 default:
1458 error = ether_ioctl(ifp, cmd, data);
1459 if (error == ENETRESET) {
1460 /*
1461 * Multicast list has changed; set the hardware filter
1462 * accordingly.
1463 */
1464 wm_set_filter(sc);
1465 error = 0;
1466 }
1467 break;
1468 }
1469
1470 /* Try to get more packets going. */
1471 wm_start(ifp);
1472
1473 splx(s);
1474 return (error);
1475 }
1476
1477 /*
1478 * wm_intr:
1479 *
1480 * Interrupt service routine.
1481 */
1482 int
1483 wm_intr(void *arg)
1484 {
1485 struct wm_softc *sc = arg;
1486 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1487 uint32_t icr;
1488 int wantinit, handled = 0;
1489
1490 for (wantinit = 0; wantinit == 0;) {
1491 icr = CSR_READ(sc, WMREG_ICR);
1492 if ((icr & sc->sc_icr) == 0)
1493 break;
1494
1495 #if 0 /*NRND > 0*/
1496 if (RND_ENABLED(&sc->rnd_source))
1497 rnd_add_uint32(&sc->rnd_source, icr);
1498 #endif
1499
1500 handled = 1;
1501
1502 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1503 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1504 DPRINTF(WM_DEBUG_RX,
1505 ("%s: RX: got Rx intr 0x%08x\n",
1506 sc->sc_dev.dv_xname,
1507 icr & (ICR_RXDMT0|ICR_RXT0)));
1508 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1509 }
1510 #endif
1511 wm_rxintr(sc);
1512
1513 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1514 if (icr & ICR_TXDW) {
1515 DPRINTF(WM_DEBUG_TX,
1516 ("%s: TX: got TDXW interrupt\n",
1517 sc->sc_dev.dv_xname));
1518 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1519 }
1520 #endif
1521 wm_txintr(sc);
1522
1523 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1524 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1525 wm_linkintr(sc, icr);
1526 }
1527
1528 if (icr & ICR_RXO) {
1529 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1530 wantinit = 1;
1531 }
1532 }
1533
1534 if (handled) {
1535 if (wantinit)
1536 wm_init(ifp);
1537
1538 /* Try to get more packets going. */
1539 wm_start(ifp);
1540 }
1541
1542 return (handled);
1543 }
1544
1545 /*
1546 * wm_txintr:
1547 *
1548 * Helper; handle transmit interrupts.
1549 */
1550 void
1551 wm_txintr(struct wm_softc *sc)
1552 {
1553 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1554 struct wm_txsoft *txs;
1555 uint8_t status;
1556 int i;
1557
1558 ifp->if_flags &= ~IFF_OACTIVE;
1559
1560 /*
1561 * Go through the Tx list and free mbufs for those
1562 * frames which have been transmitted.
1563 */
1564 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1565 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1566 txs = &sc->sc_txsoft[i];
1567
1568 DPRINTF(WM_DEBUG_TX,
1569 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1570
1571 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1572 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1573
1574 status = le32toh(sc->sc_txdescs[
1575 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1576 if ((status & WTX_ST_DD) == 0) {
1577 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1578 BUS_DMASYNC_PREREAD);
1579 break;
1580 }
1581
1582 DPRINTF(WM_DEBUG_TX,
1583 ("%s: TX: job %d done: descs %d..%d\n",
1584 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1585 txs->txs_lastdesc));
1586
1587 /*
1588 * XXX We should probably be using the statistics
1589 * XXX registers, but I don't know if they exist
1590 * XXX on chips before the i82544.
1591 */
1592
1593 #ifdef WM_EVENT_COUNTERS
1594 if (status & WTX_ST_TU)
1595 WM_EVCNT_INCR(&sc->sc_ev_tu);
1596 #endif /* WM_EVENT_COUNTERS */
1597
1598 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1599 ifp->if_oerrors++;
1600 if (status & WTX_ST_LC)
1601 printf("%s: late collision\n",
1602 sc->sc_dev.dv_xname);
1603 else if (status & WTX_ST_EC) {
1604 ifp->if_collisions += 16;
1605 printf("%s: excessive collisions\n",
1606 sc->sc_dev.dv_xname);
1607 }
1608 } else
1609 ifp->if_opackets++;
1610
1611 sc->sc_txfree += txs->txs_ndesc;
1612 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1613 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1614 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1615 m_freem(txs->txs_mbuf);
1616 txs->txs_mbuf = NULL;
1617 }
1618
1619 /* Update the dirty transmit buffer pointer. */
1620 sc->sc_txsdirty = i;
1621 DPRINTF(WM_DEBUG_TX,
1622 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1623
1624 /*
1625 * If there are no more pending transmissions, cancel the watchdog
1626 * timer.
1627 */
1628 if (sc->sc_txsfree == WM_TXQUEUELEN)
1629 ifp->if_timer = 0;
1630 }
1631
1632 /*
1633 * wm_rxintr:
1634 *
1635 * Helper; handle receive interrupts.
1636 */
1637 void
1638 wm_rxintr(struct wm_softc *sc)
1639 {
1640 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1641 struct wm_rxsoft *rxs;
1642 struct mbuf *m;
1643 int i, len;
1644 uint8_t status, errors;
1645
1646 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1647 rxs = &sc->sc_rxsoft[i];
1648
1649 DPRINTF(WM_DEBUG_RX,
1650 ("%s: RX: checking descriptor %d\n",
1651 sc->sc_dev.dv_xname, i));
1652
1653 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1654
1655 status = sc->sc_rxdescs[i].wrx_status;
1656 errors = sc->sc_rxdescs[i].wrx_errors;
1657 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1658
1659 if ((status & WRX_ST_DD) == 0) {
1660 /*
1661 * We have processed all of the receive descriptors.
1662 */
1663 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1664 break;
1665 }
1666
1667 if (__predict_false(sc->sc_rxdiscard)) {
1668 DPRINTF(WM_DEBUG_RX,
1669 ("%s: RX: discarding contents of descriptor %d\n",
1670 sc->sc_dev.dv_xname, i));
1671 WM_INIT_RXDESC(sc, i);
1672 if (status & WRX_ST_EOP) {
1673 /* Reset our state. */
1674 DPRINTF(WM_DEBUG_RX,
1675 ("%s: RX: resetting rxdiscard -> 0\n",
1676 sc->sc_dev.dv_xname));
1677 sc->sc_rxdiscard = 0;
1678 }
1679 continue;
1680 }
1681
1682 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1683 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1684
1685 m = rxs->rxs_mbuf;
1686
1687 /*
1688 * Add a new receive buffer to the ring.
1689 */
1690 if (wm_add_rxbuf(sc, i) != 0) {
1691 /*
1692 * Failed, throw away what we've done so
1693 * far, and discard the rest of the packet.
1694 */
1695 ifp->if_ierrors++;
1696 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1697 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1698 WM_INIT_RXDESC(sc, i);
1699 if ((status & WRX_ST_EOP) == 0)
1700 sc->sc_rxdiscard = 1;
1701 if (sc->sc_rxhead != NULL)
1702 m_freem(sc->sc_rxhead);
1703 WM_RXCHAIN_RESET(sc);
1704 DPRINTF(WM_DEBUG_RX,
1705 ("%s: RX: Rx buffer allocation failed, "
1706 "dropping packet%s\n", sc->sc_dev.dv_xname,
1707 sc->sc_rxdiscard ? " (discard)" : ""));
1708 continue;
1709 }
1710
1711 WM_RXCHAIN_LINK(sc, m);
1712
1713 m->m_len = len;
1714
1715 DPRINTF(WM_DEBUG_RX,
1716 ("%s: RX: buffer at %p len %d\n",
1717 sc->sc_dev.dv_xname, m->m_data, len));
1718
1719 /*
1720 * If this is not the end of the packet, keep
1721 * looking.
1722 */
1723 if ((status & WRX_ST_EOP) == 0) {
1724 sc->sc_rxlen += len;
1725 DPRINTF(WM_DEBUG_RX,
1726 ("%s: RX: not yet EOP, rxlen -> %d\n",
1727 sc->sc_dev.dv_xname, sc->sc_rxlen));
1728 continue;
1729 }
1730
1731 /*
1732 * Okay, we have the entire packet now...
1733 */
1734 *sc->sc_rxtailp = NULL;
1735 m = sc->sc_rxhead;
1736 len += sc->sc_rxlen;
1737
1738 WM_RXCHAIN_RESET(sc);
1739
1740 DPRINTF(WM_DEBUG_RX,
1741 ("%s: RX: have entire packet, len -> %d\n",
1742 sc->sc_dev.dv_xname, len));
1743
1744 /*
1745 * If an error occurred, update stats and drop the packet.
1746 */
1747 if (errors &
1748 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1749 ifp->if_ierrors++;
1750 if (errors & WRX_ER_SE)
1751 printf("%s: symbol error\n",
1752 sc->sc_dev.dv_xname);
1753 else if (errors & WRX_ER_SEQ)
1754 printf("%s: receive sequence error\n",
1755 sc->sc_dev.dv_xname);
1756 else if (errors & WRX_ER_CE)
1757 printf("%s: CRC error\n",
1758 sc->sc_dev.dv_xname);
1759 m_freem(m);
1760 continue;
1761 }
1762
1763 /*
1764 * No errors. Receive the packet.
1765 *
1766 * Note, we have configured the chip to include the
1767 * CRC with every packet.
1768 */
1769 m->m_flags |= M_HASFCS;
1770 m->m_pkthdr.rcvif = ifp;
1771 m->m_pkthdr.len = len;
1772
1773 #if 0 /* XXXJRT */
1774 /*
1775 * If VLANs are enabled, VLAN packets have been unwrapped
1776 * for us. Associate the tag with the packet.
1777 */
1778 if (sc->sc_ethercom.ec_nvlans != 0 &&
1779 (status & WRX_ST_VP) != 0) {
1780 struct m_tag *vtag;
1781
1782 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1783 M_NOWAIT);
1784 if (vtag == NULL) {
1785 ifp->if_ierrors++;
1786 printf("%s: unable to allocate VLAN tag\n",
1787 sc->sc_dev.dv_xname);
1788 m_freem(m);
1789 continue;
1790 }
1791
1792 *(u_int *)(vtag + 1) =
1793 le16toh(sc->sc_rxdescs[i].wrx_special);
1794 }
1795 #endif /* XXXJRT */
1796
1797 /*
1798 * Set up checksum info for this packet.
1799 */
1800 if (status & WRX_ST_IPCS) {
1801 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1802 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1803 if (errors & WRX_ER_IPE)
1804 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1805 }
1806 if (status & WRX_ST_TCPCS) {
1807 /*
1808 * Note: we don't know if this was TCP or UDP,
1809 * so we just set both bits, and expect the
1810 * upper layers to deal.
1811 */
1812 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1813 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1814 if (errors & WRX_ER_TCPE)
1815 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1816 }
1817
1818 ifp->if_ipackets++;
1819
1820 #if NBPFILTER > 0
1821 /* Pass this up to any BPF listeners. */
1822 if (ifp->if_bpf)
1823 bpf_mtap(ifp->if_bpf, m);
1824 #endif /* NBPFILTER > 0 */
1825
1826 /* Pass it on. */
1827 (*ifp->if_input)(ifp, m);
1828 }
1829
1830 /* Update the receive pointer. */
1831 sc->sc_rxptr = i;
1832
1833 DPRINTF(WM_DEBUG_RX,
1834 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1835 }
1836
1837 /*
1838 * wm_linkintr:
1839 *
1840 * Helper; handle link interrupts.
1841 */
1842 void
1843 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1844 {
1845 uint32_t status;
1846
1847 /*
1848 * If we get a link status interrupt on a 1000BASE-T
1849 * device, just fall into the normal MII tick path.
1850 */
1851 if (sc->sc_flags & WM_F_HAS_MII) {
1852 if (icr & ICR_LSC) {
1853 DPRINTF(WM_DEBUG_LINK,
1854 ("%s: LINK: LSC -> mii_tick\n",
1855 sc->sc_dev.dv_xname));
1856 mii_tick(&sc->sc_mii);
1857 } else if (icr & ICR_RXSEQ) {
1858 DPRINTF(WM_DEBUG_LINK,
1859 ("%s: LINK Receive sequence error\n",
1860 sc->sc_dev.dv_xname));
1861 }
1862 return;
1863 }
1864
1865 /*
1866 * If we are now receiving /C/, check for link again in
1867 * a couple of link clock ticks.
1868 */
1869 if (icr & ICR_RXCFG) {
1870 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1871 sc->sc_dev.dv_xname));
1872 sc->sc_tbi_anstate = 2;
1873 }
1874
1875 if (icr & ICR_LSC) {
1876 status = CSR_READ(sc, WMREG_STATUS);
1877 if (status & STATUS_LU) {
1878 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1879 sc->sc_dev.dv_xname,
1880 (status & STATUS_FD) ? "FDX" : "HDX"));
1881 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1882 if (status & STATUS_FD)
1883 sc->sc_tctl |=
1884 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1885 else
1886 sc->sc_tctl |=
1887 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1888 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1889 sc->sc_tbi_linkup = 1;
1890 } else {
1891 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1892 sc->sc_dev.dv_xname));
1893 sc->sc_tbi_linkup = 0;
1894 }
1895 sc->sc_tbi_anstate = 2;
1896 wm_tbi_set_linkled(sc);
1897 } else if (icr & ICR_RXSEQ) {
1898 DPRINTF(WM_DEBUG_LINK,
1899 ("%s: LINK: Receive sequence error\n",
1900 sc->sc_dev.dv_xname));
1901 }
1902 }
1903
1904 /*
1905 * wm_tick:
1906 *
1907 * One second timer, used to check link status, sweep up
1908 * completed transmit jobs, etc.
1909 */
1910 void
1911 wm_tick(void *arg)
1912 {
1913 struct wm_softc *sc = arg;
1914 int s;
1915
1916 s = splnet();
1917
1918 if (sc->sc_flags & WM_F_HAS_MII)
1919 mii_tick(&sc->sc_mii);
1920 else
1921 wm_tbi_check_link(sc);
1922
1923 splx(s);
1924
1925 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1926 }
1927
1928 /*
1929 * wm_reset:
1930 *
1931 * Reset the i82542 chip.
1932 */
1933 void
1934 wm_reset(struct wm_softc *sc)
1935 {
1936 int i;
1937
1938 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1939 delay(10000);
1940
1941 for (i = 0; i < 1000; i++) {
1942 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1943 return;
1944 delay(20);
1945 }
1946
1947 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1948 printf("%s: WARNING: reset failed to complete\n",
1949 sc->sc_dev.dv_xname);
1950 }
1951
1952 /*
1953 * wm_init: [ifnet interface function]
1954 *
1955 * Initialize the interface. Must be called at splnet().
1956 */
1957 int
1958 wm_init(struct ifnet *ifp)
1959 {
1960 struct wm_softc *sc = ifp->if_softc;
1961 struct wm_rxsoft *rxs;
1962 int i, error = 0;
1963 uint32_t reg;
1964
1965 /*
1966 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1967 * There is a small but measurable benefit to avoiding the adjusment
1968 * of the descriptor so that the headers are aligned, for normal mtu,
1969 * on such platforms. One possibility is that the DMA itself is
1970 * slightly more efficient if the front of the entire packet (instead
1971 * of the front of the headers) is aligned.
1972 *
1973 * Note we must always set align_tweak to 0 if we are using
1974 * jumbo frames.
1975 */
1976 #ifdef __NO_STRICT_ALIGNMENT
1977 sc->sc_align_tweak = 0;
1978 #else
1979 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1980 sc->sc_align_tweak = 0;
1981 else
1982 sc->sc_align_tweak = 2;
1983 #endif /* __NO_STRICT_ALIGNMENT */
1984
1985 /* Cancel any pending I/O. */
1986 wm_stop(ifp, 0);
1987
1988 /* Reset the chip to a known state. */
1989 wm_reset(sc);
1990
1991 /* Initialize the transmit descriptor ring. */
1992 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1993 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1994 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1995 sc->sc_txfree = WM_NTXDESC;
1996 sc->sc_txnext = 0;
1997
1998 sc->sc_txctx_ipcs = 0xffffffff;
1999 sc->sc_txctx_tucs = 0xffffffff;
2000
2001 if (sc->sc_type < WM_T_82543) {
2002 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2003 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2004 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2005 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2006 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2007 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2008 } else {
2009 CSR_WRITE(sc, WMREG_TBDAH, 0);
2010 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2011 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2012 CSR_WRITE(sc, WMREG_TDH, 0);
2013 CSR_WRITE(sc, WMREG_TDT, 0);
2014 CSR_WRITE(sc, WMREG_TIDV, 128);
2015
2016 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2017 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2018 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2019 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2020 }
2021 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2022 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2023
2024 /* Initialize the transmit job descriptors. */
2025 for (i = 0; i < WM_TXQUEUELEN; i++)
2026 sc->sc_txsoft[i].txs_mbuf = NULL;
2027 sc->sc_txsfree = WM_TXQUEUELEN;
2028 sc->sc_txsnext = 0;
2029 sc->sc_txsdirty = 0;
2030
2031 /*
2032 * Initialize the receive descriptor and receive job
2033 * descriptor rings.
2034 */
2035 if (sc->sc_type < WM_T_82543) {
2036 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2037 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2038 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2039 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2040 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2041 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2042
2043 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2044 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2045 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2046 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2047 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2048 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2049 } else {
2050 CSR_WRITE(sc, WMREG_RDBAH, 0);
2051 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2052 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2053 CSR_WRITE(sc, WMREG_RDH, 0);
2054 CSR_WRITE(sc, WMREG_RDT, 0);
2055 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2056 }
2057 for (i = 0; i < WM_NRXDESC; i++) {
2058 rxs = &sc->sc_rxsoft[i];
2059 if (rxs->rxs_mbuf == NULL) {
2060 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2061 printf("%s: unable to allocate or map rx "
2062 "buffer %d, error = %d\n",
2063 sc->sc_dev.dv_xname, i, error);
2064 /*
2065 * XXX Should attempt to run with fewer receive
2066 * XXX buffers instead of just failing.
2067 */
2068 wm_rxdrain(sc);
2069 goto out;
2070 }
2071 } else
2072 WM_INIT_RXDESC(sc, i);
2073 }
2074 sc->sc_rxptr = 0;
2075 sc->sc_rxdiscard = 0;
2076 WM_RXCHAIN_RESET(sc);
2077
2078 /*
2079 * Clear out the VLAN table -- we don't use it (yet).
2080 */
2081 CSR_WRITE(sc, WMREG_VET, 0);
2082 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2083 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2084
2085 /*
2086 * Set up flow-control parameters.
2087 *
2088 * XXX Values could probably stand some tuning.
2089 */
2090 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2091 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2092 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2093 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2094
2095 if (sc->sc_type < WM_T_82543) {
2096 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2097 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2098 } else {
2099 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2100 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2101 }
2102 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2103 }
2104
2105 #if 0 /* XXXJRT */
2106 /* Deal with VLAN enables. */
2107 if (sc->sc_ethercom.ec_nvlans != 0)
2108 sc->sc_ctrl |= CTRL_VME;
2109 else
2110 #endif /* XXXJRT */
2111 sc->sc_ctrl &= ~CTRL_VME;
2112
2113 /* Write the control registers. */
2114 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2115 #if 0
2116 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2117 #endif
2118
2119 /*
2120 * Set up checksum offload parameters.
2121 */
2122 reg = CSR_READ(sc, WMREG_RXCSUM);
2123 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2124 reg |= RXCSUM_IPOFL;
2125 else
2126 reg &= ~RXCSUM_IPOFL;
2127 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2128 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2129 else {
2130 reg &= ~RXCSUM_TUOFL;
2131 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2132 reg &= ~RXCSUM_IPOFL;
2133 }
2134 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2135
2136 /*
2137 * Set up the interrupt registers.
2138 */
2139 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2140 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2141 ICR_RXO | ICR_RXT0;
2142 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2143 sc->sc_icr |= ICR_RXCFG;
2144 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2145
2146 /* Set up the inter-packet gap. */
2147 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2148
2149 #if 0 /* XXXJRT */
2150 /* Set the VLAN ethernetype. */
2151 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2152 #endif
2153
2154 /*
2155 * Set up the transmit control register; we start out with
2156 * a collision distance suitable for FDX, but update it whe
2157 * we resolve the media type.
2158 */
2159 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2160 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2161 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2162
2163 /* Set the media. */
2164 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2165
2166 /*
2167 * Set up the receive control register; we actually program
2168 * the register when we set the receive filter. Use multicast
2169 * address offset type 0.
2170 *
2171 * Only the i82544 has the ability to strip the incoming
2172 * CRC, so we don't enable that feature.
2173 */
2174 sc->sc_mchash_type = 0;
2175 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2176 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2177
2178 if(MCLBYTES == 2048) {
2179 sc->sc_rctl |= RCTL_2k;
2180 } else {
2181 /*
2182 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2183 * XXX segments, dropping" -- why?
2184 */
2185 #if 0
2186 if(sc->sc_type >= WM_T_82543) {
2187 switch(MCLBYTES) {
2188 case 4096:
2189 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2190 break;
2191 case 8192:
2192 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2193 break;
2194 case 16384:
2195 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2196 break;
2197 default:
2198 panic("wm_init: MCLBYTES %d unsupported",
2199 MCLBYTES);
2200 break;
2201 }
2202 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2203 #else
2204 panic("wm_init: MCLBYTES > 2048 not supported.");
2205 #endif
2206 }
2207
2208 /* Set the receive filter. */
2209 wm_set_filter(sc);
2210
2211 /* Start the one second link check clock. */
2212 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2213
2214 /* ...all done! */
2215 ifp->if_flags |= IFF_RUNNING;
2216 ifp->if_flags &= ~IFF_OACTIVE;
2217
2218 out:
2219 if (error)
2220 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2221 return (error);
2222 }
2223
2224 /*
2225 * wm_rxdrain:
2226 *
2227 * Drain the receive queue.
2228 */
2229 void
2230 wm_rxdrain(struct wm_softc *sc)
2231 {
2232 struct wm_rxsoft *rxs;
2233 int i;
2234
2235 for (i = 0; i < WM_NRXDESC; i++) {
2236 rxs = &sc->sc_rxsoft[i];
2237 if (rxs->rxs_mbuf != NULL) {
2238 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2239 m_freem(rxs->rxs_mbuf);
2240 rxs->rxs_mbuf = NULL;
2241 }
2242 }
2243 }
2244
2245 /*
2246 * wm_stop: [ifnet interface function]
2247 *
2248 * Stop transmission on the interface.
2249 */
2250 void
2251 wm_stop(struct ifnet *ifp, int disable)
2252 {
2253 struct wm_softc *sc = ifp->if_softc;
2254 struct wm_txsoft *txs;
2255 int i;
2256
2257 /* Stop the one second clock. */
2258 callout_stop(&sc->sc_tick_ch);
2259
2260 if (sc->sc_flags & WM_F_HAS_MII) {
2261 /* Down the MII. */
2262 mii_down(&sc->sc_mii);
2263 }
2264
2265 /* Stop the transmit and receive processes. */
2266 CSR_WRITE(sc, WMREG_TCTL, 0);
2267 CSR_WRITE(sc, WMREG_RCTL, 0);
2268
2269 /* Release any queued transmit buffers. */
2270 for (i = 0; i < WM_TXQUEUELEN; i++) {
2271 txs = &sc->sc_txsoft[i];
2272 if (txs->txs_mbuf != NULL) {
2273 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2274 m_freem(txs->txs_mbuf);
2275 txs->txs_mbuf = NULL;
2276 }
2277 }
2278
2279 if (disable)
2280 wm_rxdrain(sc);
2281
2282 /* Mark the interface as down and cancel the watchdog timer. */
2283 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2284 ifp->if_timer = 0;
2285 }
2286
2287 /*
2288 * wm_acquire_eeprom:
2289 *
2290 * Perform the EEPROM handshake required on some chips.
2291 */
2292 static int
2293 wm_acquire_eeprom(struct wm_softc *sc)
2294 {
2295 uint32_t reg;
2296 int x;
2297
2298 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2299 reg = CSR_READ(sc, WMREG_EECD);
2300
2301 /* Request EEPROM access. */
2302 reg |= EECD_EE_REQ;
2303 CSR_WRITE(sc, WMREG_EECD, reg);
2304
2305 /* ..and wait for it to be granted. */
2306 for (x = 0; x < 100; x++) {
2307 reg = CSR_READ(sc, WMREG_EECD);
2308 if (reg & EECD_EE_GNT)
2309 break;
2310 delay(5);
2311 }
2312 if ((reg & EECD_EE_GNT) == 0) {
2313 printf("%s: could not acquire EEPROM GNT\n",
2314 sc->sc_dev.dv_xname);
2315 reg &= ~EECD_EE_REQ;
2316 CSR_WRITE(sc, WMREG_EECD, reg);
2317 return (1);
2318 }
2319 }
2320
2321 return (0);
2322 }
2323
2324 /*
2325 * wm_release_eeprom:
2326 *
2327 * Release the EEPROM mutex.
2328 */
2329 static void
2330 wm_release_eeprom(struct wm_softc *sc)
2331 {
2332 uint32_t reg;
2333
2334 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2335 reg = CSR_READ(sc, WMREG_EECD);
2336 reg &= ~EECD_EE_REQ;
2337 CSR_WRITE(sc, WMREG_EECD, reg);
2338 }
2339 }
2340
2341 /*
2342 * wm_read_eeprom:
2343 *
2344 * Read data from the serial EEPROM.
2345 */
2346 void
2347 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2348 {
2349 uint32_t reg;
2350 int i, x;
2351
2352 for (i = 0; i < wordcnt; i++) {
2353 if (wm_acquire_eeprom(sc)) {
2354 /* Failed to acquire EEPROM. */
2355 *data = 0xffff;
2356 continue;
2357 }
2358
2359 reg = CSR_READ(sc, WMREG_EECD);
2360
2361 /* Clear SK and DI. */
2362 reg &= ~(EECD_SK | EECD_DI);
2363 CSR_WRITE(sc, WMREG_EECD, reg);
2364
2365 /* Set CHIP SELECT. */
2366 reg |= EECD_CS;
2367 CSR_WRITE(sc, WMREG_EECD, reg);
2368 delay(2);
2369
2370 /* Shift in the READ command. */
2371 for (x = 3; x > 0; x--) {
2372 if (UWIRE_OPC_READ & (1 << (x - 1)))
2373 reg |= EECD_DI;
2374 else
2375 reg &= ~EECD_DI;
2376 CSR_WRITE(sc, WMREG_EECD, reg);
2377 delay(2);
2378 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2379 delay(2);
2380 CSR_WRITE(sc, WMREG_EECD, reg);
2381 delay(2);
2382 }
2383
2384 /* Shift in address. */
2385 for (x = sc->sc_ee_addrbits; x > 0; x--) {
2386 if ((word + i) & (1 << (x - 1)))
2387 reg |= EECD_DI;
2388 else
2389 reg &= ~EECD_DI;
2390 CSR_WRITE(sc, WMREG_EECD, reg);
2391 delay(2);
2392 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2393 delay(2);
2394 CSR_WRITE(sc, WMREG_EECD, reg);
2395 delay(2);
2396 }
2397
2398 /* Shift out the data. */
2399 reg &= ~EECD_DI;
2400 data[i] = 0;
2401 for (x = 16; x > 0; x--) {
2402 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2403 delay(2);
2404 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2405 data[i] |= (1 << (x - 1));
2406 CSR_WRITE(sc, WMREG_EECD, reg);
2407 delay(2);
2408 }
2409
2410 /* Clear CHIP SELECT. */
2411 reg &= ~EECD_CS;
2412 CSR_WRITE(sc, WMREG_EECD, reg);
2413 delay(2);
2414
2415 wm_release_eeprom(sc);
2416 }
2417 }
2418
2419 /*
2420 * wm_add_rxbuf:
2421 *
2422 * Add a receive buffer to the indiciated descriptor.
2423 */
2424 int
2425 wm_add_rxbuf(struct wm_softc *sc, int idx)
2426 {
2427 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2428 struct mbuf *m;
2429 int error;
2430
2431 MGETHDR(m, M_DONTWAIT, MT_DATA);
2432 if (m == NULL)
2433 return (ENOBUFS);
2434
2435 MCLGET(m, M_DONTWAIT);
2436 if ((m->m_flags & M_EXT) == 0) {
2437 m_freem(m);
2438 return (ENOBUFS);
2439 }
2440
2441 if (rxs->rxs_mbuf != NULL)
2442 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2443
2444 rxs->rxs_mbuf = m;
2445
2446 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2447 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2448 BUS_DMA_READ|BUS_DMA_NOWAIT);
2449 if (error) {
2450 printf("%s: unable to load rx DMA map %d, error = %d\n",
2451 sc->sc_dev.dv_xname, idx, error);
2452 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2453 }
2454
2455 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2456 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2457
2458 WM_INIT_RXDESC(sc, idx);
2459
2460 return (0);
2461 }
2462
2463 /*
2464 * wm_set_ral:
2465 *
2466 * Set an entery in the receive address list.
2467 */
2468 static void
2469 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2470 {
2471 uint32_t ral_lo, ral_hi;
2472
2473 if (enaddr != NULL) {
2474 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2475 (enaddr[3] << 24);
2476 ral_hi = enaddr[4] | (enaddr[5] << 8);
2477 ral_hi |= RAL_AV;
2478 } else {
2479 ral_lo = 0;
2480 ral_hi = 0;
2481 }
2482
2483 if (sc->sc_type >= WM_T_82544) {
2484 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2485 ral_lo);
2486 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2487 ral_hi);
2488 } else {
2489 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2490 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2491 }
2492 }
2493
2494 /*
2495 * wm_mchash:
2496 *
2497 * Compute the hash of the multicast address for the 4096-bit
2498 * multicast filter.
2499 */
2500 static uint32_t
2501 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2502 {
2503 static const int lo_shift[4] = { 4, 3, 2, 0 };
2504 static const int hi_shift[4] = { 4, 5, 6, 8 };
2505 uint32_t hash;
2506
2507 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2508 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2509
2510 return (hash & 0xfff);
2511 }
2512
2513 /*
2514 * wm_set_filter:
2515 *
2516 * Set up the receive filter.
2517 */
2518 void
2519 wm_set_filter(struct wm_softc *sc)
2520 {
2521 struct ethercom *ec = &sc->sc_ethercom;
2522 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2523 struct ether_multi *enm;
2524 struct ether_multistep step;
2525 bus_addr_t mta_reg;
2526 uint32_t hash, reg, bit;
2527 int i;
2528
2529 if (sc->sc_type >= WM_T_82544)
2530 mta_reg = WMREG_CORDOVA_MTA;
2531 else
2532 mta_reg = WMREG_MTA;
2533
2534 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2535
2536 if (ifp->if_flags & IFF_BROADCAST)
2537 sc->sc_rctl |= RCTL_BAM;
2538 if (ifp->if_flags & IFF_PROMISC) {
2539 sc->sc_rctl |= RCTL_UPE;
2540 goto allmulti;
2541 }
2542
2543 /*
2544 * Set the station address in the first RAL slot, and
2545 * clear the remaining slots.
2546 */
2547 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2548 for (i = 1; i < WM_RAL_TABSIZE; i++)
2549 wm_set_ral(sc, NULL, i);
2550
2551 /* Clear out the multicast table. */
2552 for (i = 0; i < WM_MC_TABSIZE; i++)
2553 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2554
2555 ETHER_FIRST_MULTI(step, ec, enm);
2556 while (enm != NULL) {
2557 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2558 /*
2559 * We must listen to a range of multicast addresses.
2560 * For now, just accept all multicasts, rather than
2561 * trying to set only those filter bits needed to match
2562 * the range. (At this time, the only use of address
2563 * ranges is for IP multicast routing, for which the
2564 * range is big enough to require all bits set.)
2565 */
2566 goto allmulti;
2567 }
2568
2569 hash = wm_mchash(sc, enm->enm_addrlo);
2570
2571 reg = (hash >> 5) & 0x7f;
2572 bit = hash & 0x1f;
2573
2574 hash = CSR_READ(sc, mta_reg + (reg << 2));
2575 hash |= 1U << bit;
2576
2577 /* XXX Hardware bug?? */
2578 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2579 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2580 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2581 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2582 } else
2583 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2584
2585 ETHER_NEXT_MULTI(step, enm);
2586 }
2587
2588 ifp->if_flags &= ~IFF_ALLMULTI;
2589 goto setit;
2590
2591 allmulti:
2592 ifp->if_flags |= IFF_ALLMULTI;
2593 sc->sc_rctl |= RCTL_MPE;
2594
2595 setit:
2596 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2597 }
2598
2599 /*
2600 * wm_tbi_mediainit:
2601 *
2602 * Initialize media for use on 1000BASE-X devices.
2603 */
2604 void
2605 wm_tbi_mediainit(struct wm_softc *sc)
2606 {
2607 const char *sep = "";
2608
2609 if (sc->sc_type < WM_T_82543)
2610 sc->sc_tipg = TIPG_WM_DFLT;
2611 else
2612 sc->sc_tipg = TIPG_LG_DFLT;
2613
2614 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2615 wm_tbi_mediastatus);
2616
2617 /*
2618 * SWD Pins:
2619 *
2620 * 0 = Link LED (output)
2621 * 1 = Loss Of Signal (input)
2622 */
2623 sc->sc_ctrl |= CTRL_SWDPIO(0);
2624 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2625
2626 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2627
2628 #define ADD(ss, mm, dd) \
2629 do { \
2630 printf("%s%s", sep, ss); \
2631 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2632 sep = ", "; \
2633 } while (/*CONSTCOND*/0)
2634
2635 printf("%s: ", sc->sc_dev.dv_xname);
2636 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2637 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2638 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2639 printf("\n");
2640
2641 #undef ADD
2642
2643 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2644 }
2645
2646 /*
2647 * wm_tbi_mediastatus: [ifmedia interface function]
2648 *
2649 * Get the current interface media status on a 1000BASE-X device.
2650 */
2651 void
2652 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2653 {
2654 struct wm_softc *sc = ifp->if_softc;
2655
2656 ifmr->ifm_status = IFM_AVALID;
2657 ifmr->ifm_active = IFM_ETHER;
2658
2659 if (sc->sc_tbi_linkup == 0) {
2660 ifmr->ifm_active |= IFM_NONE;
2661 return;
2662 }
2663
2664 ifmr->ifm_status |= IFM_ACTIVE;
2665 ifmr->ifm_active |= IFM_1000_SX;
2666 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2667 ifmr->ifm_active |= IFM_FDX;
2668 }
2669
2670 /*
2671 * wm_tbi_mediachange: [ifmedia interface function]
2672 *
2673 * Set hardware to newly-selected media on a 1000BASE-X device.
2674 */
2675 int
2676 wm_tbi_mediachange(struct ifnet *ifp)
2677 {
2678 struct wm_softc *sc = ifp->if_softc;
2679 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2680 uint32_t status;
2681 int i;
2682
2683 sc->sc_txcw = ife->ifm_data;
2684 if (sc->sc_ctrl & CTRL_RFCE)
2685 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2686 if (sc->sc_ctrl & CTRL_TFCE)
2687 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2688 sc->sc_txcw |= TXCW_ANE;
2689
2690 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2691 delay(10000);
2692
2693 sc->sc_tbi_anstate = 0;
2694
2695 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2696 /* Have signal; wait for the link to come up. */
2697 for (i = 0; i < 50; i++) {
2698 delay(10000);
2699 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2700 break;
2701 }
2702
2703 status = CSR_READ(sc, WMREG_STATUS);
2704 if (status & STATUS_LU) {
2705 /* Link is up. */
2706 DPRINTF(WM_DEBUG_LINK,
2707 ("%s: LINK: set media -> link up %s\n",
2708 sc->sc_dev.dv_xname,
2709 (status & STATUS_FD) ? "FDX" : "HDX"));
2710 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2711 if (status & STATUS_FD)
2712 sc->sc_tctl |=
2713 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2714 else
2715 sc->sc_tctl |=
2716 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2717 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2718 sc->sc_tbi_linkup = 1;
2719 } else {
2720 /* Link is down. */
2721 DPRINTF(WM_DEBUG_LINK,
2722 ("%s: LINK: set media -> link down\n",
2723 sc->sc_dev.dv_xname));
2724 sc->sc_tbi_linkup = 0;
2725 }
2726 } else {
2727 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2728 sc->sc_dev.dv_xname));
2729 sc->sc_tbi_linkup = 0;
2730 }
2731
2732 wm_tbi_set_linkled(sc);
2733
2734 return (0);
2735 }
2736
2737 /*
2738 * wm_tbi_set_linkled:
2739 *
2740 * Update the link LED on 1000BASE-X devices.
2741 */
2742 void
2743 wm_tbi_set_linkled(struct wm_softc *sc)
2744 {
2745
2746 if (sc->sc_tbi_linkup)
2747 sc->sc_ctrl |= CTRL_SWDPIN(0);
2748 else
2749 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2750
2751 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2752 }
2753
2754 /*
2755 * wm_tbi_check_link:
2756 *
2757 * Check the link on 1000BASE-X devices.
2758 */
2759 void
2760 wm_tbi_check_link(struct wm_softc *sc)
2761 {
2762 uint32_t rxcw, ctrl, status;
2763
2764 if (sc->sc_tbi_anstate == 0)
2765 return;
2766 else if (sc->sc_tbi_anstate > 1) {
2767 DPRINTF(WM_DEBUG_LINK,
2768 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2769 sc->sc_tbi_anstate));
2770 sc->sc_tbi_anstate--;
2771 return;
2772 }
2773
2774 sc->sc_tbi_anstate = 0;
2775
2776 rxcw = CSR_READ(sc, WMREG_RXCW);
2777 ctrl = CSR_READ(sc, WMREG_CTRL);
2778 status = CSR_READ(sc, WMREG_STATUS);
2779
2780 if ((status & STATUS_LU) == 0) {
2781 DPRINTF(WM_DEBUG_LINK,
2782 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2783 sc->sc_tbi_linkup = 0;
2784 } else {
2785 DPRINTF(WM_DEBUG_LINK,
2786 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2787 (status & STATUS_FD) ? "FDX" : "HDX"));
2788 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2789 if (status & STATUS_FD)
2790 sc->sc_tctl |=
2791 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2792 else
2793 sc->sc_tctl |=
2794 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2795 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2796 sc->sc_tbi_linkup = 1;
2797 }
2798
2799 wm_tbi_set_linkled(sc);
2800 }
2801
2802 /*
2803 * wm_gmii_reset:
2804 *
2805 * Reset the PHY.
2806 */
2807 void
2808 wm_gmii_reset(struct wm_softc *sc)
2809 {
2810 uint32_t reg;
2811
2812 if (sc->sc_type >= WM_T_82544) {
2813 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2814 delay(20000);
2815
2816 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2817 delay(20000);
2818 } else {
2819 /* The PHY reset pin is active-low. */
2820 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2821 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2822 CTRL_EXT_SWDPIN(4));
2823 reg |= CTRL_EXT_SWDPIO(4);
2824
2825 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2826 delay(10);
2827
2828 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2829 delay(10);
2830
2831 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2832 delay(10);
2833 #if 0
2834 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2835 #endif
2836 }
2837 }
2838
2839 /*
2840 * wm_gmii_mediainit:
2841 *
2842 * Initialize media for use on 1000BASE-T devices.
2843 */
2844 void
2845 wm_gmii_mediainit(struct wm_softc *sc)
2846 {
2847 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2848
2849 /* We have MII. */
2850 sc->sc_flags |= WM_F_HAS_MII;
2851
2852 sc->sc_tipg = TIPG_1000T_DFLT;
2853
2854 /*
2855 * Let the chip set speed/duplex on its own based on
2856 * signals from the PHY.
2857 */
2858 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2859 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2860
2861 /* Initialize our media structures and probe the GMII. */
2862 sc->sc_mii.mii_ifp = ifp;
2863
2864 if (sc->sc_type >= WM_T_82544) {
2865 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2866 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2867 } else {
2868 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2869 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2870 }
2871 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2872
2873 wm_gmii_reset(sc);
2874
2875 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2876 wm_gmii_mediastatus);
2877
2878 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2879 MII_OFFSET_ANY, 0);
2880 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2881 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2882 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2883 } else
2884 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2885 }
2886
2887 /*
2888 * wm_gmii_mediastatus: [ifmedia interface function]
2889 *
2890 * Get the current interface media status on a 1000BASE-T device.
2891 */
2892 void
2893 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2894 {
2895 struct wm_softc *sc = ifp->if_softc;
2896
2897 mii_pollstat(&sc->sc_mii);
2898 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2899 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2900 }
2901
2902 /*
2903 * wm_gmii_mediachange: [ifmedia interface function]
2904 *
2905 * Set hardware to newly-selected media on a 1000BASE-T device.
2906 */
2907 int
2908 wm_gmii_mediachange(struct ifnet *ifp)
2909 {
2910 struct wm_softc *sc = ifp->if_softc;
2911
2912 if (ifp->if_flags & IFF_UP)
2913 mii_mediachg(&sc->sc_mii);
2914 return (0);
2915 }
2916
2917 #define MDI_IO CTRL_SWDPIN(2)
2918 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2919 #define MDI_CLK CTRL_SWDPIN(3)
2920
2921 static void
2922 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2923 {
2924 uint32_t i, v;
2925
2926 v = CSR_READ(sc, WMREG_CTRL);
2927 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2928 v |= MDI_DIR | CTRL_SWDPIO(3);
2929
2930 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2931 if (data & i)
2932 v |= MDI_IO;
2933 else
2934 v &= ~MDI_IO;
2935 CSR_WRITE(sc, WMREG_CTRL, v);
2936 delay(10);
2937 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2938 delay(10);
2939 CSR_WRITE(sc, WMREG_CTRL, v);
2940 delay(10);
2941 }
2942 }
2943
2944 static uint32_t
2945 i82543_mii_recvbits(struct wm_softc *sc)
2946 {
2947 uint32_t v, i, data = 0;
2948
2949 v = CSR_READ(sc, WMREG_CTRL);
2950 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2951 v |= CTRL_SWDPIO(3);
2952
2953 CSR_WRITE(sc, WMREG_CTRL, v);
2954 delay(10);
2955 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2956 delay(10);
2957 CSR_WRITE(sc, WMREG_CTRL, v);
2958 delay(10);
2959
2960 for (i = 0; i < 16; i++) {
2961 data <<= 1;
2962 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2963 delay(10);
2964 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2965 data |= 1;
2966 CSR_WRITE(sc, WMREG_CTRL, v);
2967 delay(10);
2968 }
2969
2970 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2971 delay(10);
2972 CSR_WRITE(sc, WMREG_CTRL, v);
2973 delay(10);
2974
2975 return (data);
2976 }
2977
2978 #undef MDI_IO
2979 #undef MDI_DIR
2980 #undef MDI_CLK
2981
2982 /*
2983 * wm_gmii_i82543_readreg: [mii interface function]
2984 *
2985 * Read a PHY register on the GMII (i82543 version).
2986 */
2987 int
2988 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2989 {
2990 struct wm_softc *sc = (void *) self;
2991 int rv;
2992
2993 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2994 i82543_mii_sendbits(sc, reg | (phy << 5) |
2995 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2996 rv = i82543_mii_recvbits(sc) & 0xffff;
2997
2998 DPRINTF(WM_DEBUG_GMII,
2999 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3000 sc->sc_dev.dv_xname, phy, reg, rv));
3001
3002 return (rv);
3003 }
3004
3005 /*
3006 * wm_gmii_i82543_writereg: [mii interface function]
3007 *
3008 * Write a PHY register on the GMII (i82543 version).
3009 */
3010 void
3011 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3012 {
3013 struct wm_softc *sc = (void *) self;
3014
3015 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3016 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3017 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3018 (MII_COMMAND_START << 30), 32);
3019 }
3020
3021 /*
3022 * wm_gmii_i82544_readreg: [mii interface function]
3023 *
3024 * Read a PHY register on the GMII.
3025 */
3026 int
3027 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3028 {
3029 struct wm_softc *sc = (void *) self;
3030 uint32_t mdic;
3031 int i, rv;
3032
3033 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3034 MDIC_REGADD(reg));
3035
3036 for (i = 0; i < 100; i++) {
3037 mdic = CSR_READ(sc, WMREG_MDIC);
3038 if (mdic & MDIC_READY)
3039 break;
3040 delay(10);
3041 }
3042
3043 if ((mdic & MDIC_READY) == 0) {
3044 printf("%s: MDIC read timed out: phy %d reg %d\n",
3045 sc->sc_dev.dv_xname, phy, reg);
3046 rv = 0;
3047 } else if (mdic & MDIC_E) {
3048 #if 0 /* This is normal if no PHY is present. */
3049 printf("%s: MDIC read error: phy %d reg %d\n",
3050 sc->sc_dev.dv_xname, phy, reg);
3051 #endif
3052 rv = 0;
3053 } else {
3054 rv = MDIC_DATA(mdic);
3055 if (rv == 0xffff)
3056 rv = 0;
3057 }
3058
3059 return (rv);
3060 }
3061
3062 /*
3063 * wm_gmii_i82544_writereg: [mii interface function]
3064 *
3065 * Write a PHY register on the GMII.
3066 */
3067 void
3068 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3069 {
3070 struct wm_softc *sc = (void *) self;
3071 uint32_t mdic;
3072 int i;
3073
3074 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3075 MDIC_REGADD(reg) | MDIC_DATA(val));
3076
3077 for (i = 0; i < 100; i++) {
3078 mdic = CSR_READ(sc, WMREG_MDIC);
3079 if (mdic & MDIC_READY)
3080 break;
3081 delay(10);
3082 }
3083
3084 if ((mdic & MDIC_READY) == 0)
3085 printf("%s: MDIC write timed out: phy %d reg %d\n",
3086 sc->sc_dev.dv_xname, phy, reg);
3087 else if (mdic & MDIC_E)
3088 printf("%s: MDIC write error: phy %d reg %d\n",
3089 sc->sc_dev.dv_xname, phy, reg);
3090 }
3091
3092 /*
3093 * wm_gmii_statchg: [mii interface function]
3094 *
3095 * Callback from MII layer when media changes.
3096 */
3097 void
3098 wm_gmii_statchg(struct device *self)
3099 {
3100 struct wm_softc *sc = (void *) self;
3101
3102 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3103
3104 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3105 DPRINTF(WM_DEBUG_LINK,
3106 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3107 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3108 } else {
3109 DPRINTF(WM_DEBUG_LINK,
3110 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3111 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3112 }
3113
3114 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3115 }
3116