if_wm.c revision 1.51 1 /* $NetBSD: if_wm.c,v 1.51 2003/10/20 22:52:19 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.51 2003/10/20 22:52:19 thorpej Exp $");
48
49 #include "bpfilter.h"
50 #include "rnd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/callout.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/ioctl.h>
60 #include <sys/errno.h>
61 #include <sys/device.h>
62 #include <sys/queue.h>
63
64 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
65
66 #if NRND > 0
67 #include <sys/rnd.h>
68 #endif
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_ether.h>
74
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <netinet/in.h> /* XXX for struct ip */
80 #include <netinet/in_systm.h> /* XXX for struct ip */
81 #include <netinet/ip.h> /* XXX for struct ip */
82 #include <netinet/tcp.h> /* XXX for struct tcphdr */
83
84 #include <machine/bus.h>
85 #include <machine/intr.h>
86 #include <machine/endian.h>
87
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 #include <dev/mii/mii_bitbang.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95
96 #include <dev/pci/if_wmreg.h>
97
98 #ifdef WM_DEBUG
99 #define WM_DEBUG_LINK 0x01
100 #define WM_DEBUG_TX 0x02
101 #define WM_DEBUG_RX 0x04
102 #define WM_DEBUG_GMII 0x08
103 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
104
105 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
106 #else
107 #define DPRINTF(x, y) /* nothing */
108 #endif /* WM_DEBUG */
109
110 /*
111 * Transmit descriptor list size. Due to errata, we can only have
112 * 256 hardware descriptors in the ring. We tell the upper layers
113 * that they can queue a lot of packets, and we go ahead and manage
114 * up to 64 of them at a time. We allow up to 16 DMA segments per
115 * packet.
116 */
117 #define WM_NTXSEGS 16
118 #define WM_IFQUEUELEN 256
119 #define WM_TXQUEUELEN 64
120 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
121 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
122 #define WM_NTXDESC 256
123 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
124 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
125 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
126
127 /*
128 * Receive descriptor list size. We have one Rx buffer for normal
129 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
130 * packet. We allocate 256 receive descriptors, each with a 2k
131 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
132 */
133 #define WM_NRXDESC 256
134 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
135 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
136 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
137
138 /*
139 * Control structures are DMA'd to the i82542 chip. We allocate them in
140 * a single clump that maps to a single DMA segment to make serveral things
141 * easier.
142 */
143 struct wm_control_data {
144 /*
145 * The transmit descriptors.
146 */
147 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
148
149 /*
150 * The receive descriptors.
151 */
152 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
153 };
154
155 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
156 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
157 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
158
159 /*
160 * Software state for transmit jobs.
161 */
162 struct wm_txsoft {
163 struct mbuf *txs_mbuf; /* head of our mbuf chain */
164 bus_dmamap_t txs_dmamap; /* our DMA map */
165 int txs_firstdesc; /* first descriptor in packet */
166 int txs_lastdesc; /* last descriptor in packet */
167 int txs_ndesc; /* # of descriptors used */
168 };
169
170 /*
171 * Software state for receive buffers. Each descriptor gets a
172 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
173 * more than one buffer, we chain them together.
174 */
175 struct wm_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 typedef enum {
181 WM_T_unknown = 0,
182 WM_T_82542_2_0, /* i82542 2.0 (really old) */
183 WM_T_82542_2_1, /* i82542 2.1+ (old) */
184 WM_T_82543, /* i82543 */
185 WM_T_82544, /* i82544 */
186 WM_T_82540, /* i82540 */
187 WM_T_82545, /* i82545 */
188 WM_T_82545_3, /* i82545 3.0+ */
189 WM_T_82546, /* i82546 */
190 WM_T_82546_3, /* i82546 3.0+ */
191 WM_T_82541, /* i82541 */
192 WM_T_82541_2, /* i82541 2.0+ */
193 WM_T_82547, /* i82547 */
194 WM_T_82547_2, /* i82547 2.0+ */
195 } wm_chip_type;
196
197 /*
198 * Software state per device.
199 */
200 struct wm_softc {
201 struct device sc_dev; /* generic device information */
202 bus_space_tag_t sc_st; /* bus space tag */
203 bus_space_handle_t sc_sh; /* bus space handle */
204 bus_dma_tag_t sc_dmat; /* bus DMA tag */
205 struct ethercom sc_ethercom; /* ethernet common data */
206 void *sc_sdhook; /* shutdown hook */
207
208 wm_chip_type sc_type; /* chip type */
209 int sc_flags; /* flags; see below */
210
211 void *sc_ih; /* interrupt cookie */
212
213 int sc_ee_addrbits; /* EEPROM address bits */
214
215 struct mii_data sc_mii; /* MII/media information */
216
217 struct callout sc_tick_ch; /* tick callout */
218
219 bus_dmamap_t sc_cddmamap; /* control data DMA map */
220 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
221
222 int sc_align_tweak;
223
224 /*
225 * Software state for the transmit and receive descriptors.
226 */
227 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
228 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
229
230 /*
231 * Control data structures.
232 */
233 struct wm_control_data *sc_control_data;
234 #define sc_txdescs sc_control_data->wcd_txdescs
235 #define sc_rxdescs sc_control_data->wcd_rxdescs
236
237 #ifdef WM_EVENT_COUNTERS
238 /* Event counters. */
239 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
240 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
241 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
242 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
243 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
244 struct evcnt sc_ev_rxintr; /* Rx interrupts */
245 struct evcnt sc_ev_linkintr; /* Link interrupts */
246
247 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
248 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
249 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
250 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
251
252 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
253 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
254 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
255
256 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
257 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
258
259 struct evcnt sc_ev_tu; /* Tx underrun */
260 #endif /* WM_EVENT_COUNTERS */
261
262 bus_addr_t sc_tdt_reg; /* offset of TDT register */
263
264 int sc_txfree; /* number of free Tx descriptors */
265 int sc_txnext; /* next ready Tx descriptor */
266
267 int sc_txsfree; /* number of free Tx jobs */
268 int sc_txsnext; /* next free Tx job */
269 int sc_txsdirty; /* dirty Tx jobs */
270
271 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
272 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
273
274 bus_addr_t sc_rdt_reg; /* offset of RDT register */
275
276 int sc_rxptr; /* next ready Rx descriptor/queue ent */
277 int sc_rxdiscard;
278 int sc_rxlen;
279 struct mbuf *sc_rxhead;
280 struct mbuf *sc_rxtail;
281 struct mbuf **sc_rxtailp;
282
283 uint32_t sc_ctrl; /* prototype CTRL register */
284 #if 0
285 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
286 #endif
287 uint32_t sc_icr; /* prototype interrupt bits */
288 uint32_t sc_tctl; /* prototype TCTL register */
289 uint32_t sc_rctl; /* prototype RCTL register */
290 uint32_t sc_txcw; /* prototype TXCW register */
291 uint32_t sc_tipg; /* prototype TIPG register */
292
293 int sc_tbi_linkup; /* TBI link status */
294 int sc_tbi_anstate; /* autonegotiation state */
295
296 int sc_mchash_type; /* multicast filter offset */
297
298 #if NRND > 0
299 rndsource_element_t rnd_source; /* random source */
300 #endif
301 };
302
303 #define WM_RXCHAIN_RESET(sc) \
304 do { \
305 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
306 *(sc)->sc_rxtailp = NULL; \
307 (sc)->sc_rxlen = 0; \
308 } while (/*CONSTCOND*/0)
309
310 #define WM_RXCHAIN_LINK(sc, m) \
311 do { \
312 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
313 (sc)->sc_rxtailp = &(m)->m_next; \
314 } while (/*CONSTCOND*/0)
315
316 /* sc_flags */
317 #define WM_F_HAS_MII 0x01 /* has MII */
318 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
319
320 #ifdef WM_EVENT_COUNTERS
321 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
322 #else
323 #define WM_EVCNT_INCR(ev) /* nothing */
324 #endif
325
326 #define CSR_READ(sc, reg) \
327 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
328 #define CSR_WRITE(sc, reg, val) \
329 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
330
331 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
332 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
333
334 #define WM_CDTXSYNC(sc, x, n, ops) \
335 do { \
336 int __x, __n; \
337 \
338 __x = (x); \
339 __n = (n); \
340 \
341 /* If it will wrap around, sync to the end of the ring. */ \
342 if ((__x + __n) > WM_NTXDESC) { \
343 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
344 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
345 (WM_NTXDESC - __x), (ops)); \
346 __n -= (WM_NTXDESC - __x); \
347 __x = 0; \
348 } \
349 \
350 /* Now sync whatever is left. */ \
351 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
352 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
353 } while (/*CONSTCOND*/0)
354
355 #define WM_CDRXSYNC(sc, x, ops) \
356 do { \
357 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
358 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
359 } while (/*CONSTCOND*/0)
360
361 #define WM_INIT_RXDESC(sc, x) \
362 do { \
363 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
364 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
365 struct mbuf *__m = __rxs->rxs_mbuf; \
366 \
367 /* \
368 * Note: We scoot the packet forward 2 bytes in the buffer \
369 * so that the payload after the Ethernet header is aligned \
370 * to a 4-byte boundary. \
371 * \
372 * XXX BRAINDAMAGE ALERT! \
373 * The stupid chip uses the same size for every buffer, which \
374 * is set in the Receive Control register. We are using the 2K \
375 * size option, but what we REALLY want is (2K - 2)! For this \
376 * reason, we can't "scoot" packets longer than the standard \
377 * Ethernet MTU. On strict-alignment platforms, if the total \
378 * size exceeds (2K - 2) we set align_tweak to 0 and let \
379 * the upper layer copy the headers. \
380 */ \
381 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
382 \
383 __rxd->wrx_addr.wa_low = \
384 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
385 (sc)->sc_align_tweak); \
386 __rxd->wrx_addr.wa_high = 0; \
387 __rxd->wrx_len = 0; \
388 __rxd->wrx_cksum = 0; \
389 __rxd->wrx_status = 0; \
390 __rxd->wrx_errors = 0; \
391 __rxd->wrx_special = 0; \
392 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
393 \
394 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
395 } while (/*CONSTCOND*/0)
396
397 static void wm_start(struct ifnet *);
398 static void wm_watchdog(struct ifnet *);
399 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
400 static int wm_init(struct ifnet *);
401 static void wm_stop(struct ifnet *, int);
402
403 static void wm_shutdown(void *);
404
405 static void wm_reset(struct wm_softc *);
406 static void wm_rxdrain(struct wm_softc *);
407 static int wm_add_rxbuf(struct wm_softc *, int);
408 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
409 static void wm_tick(void *);
410
411 static void wm_set_filter(struct wm_softc *);
412
413 static int wm_intr(void *);
414 static void wm_txintr(struct wm_softc *);
415 static void wm_rxintr(struct wm_softc *);
416 static void wm_linkintr(struct wm_softc *, uint32_t);
417
418 static void wm_tbi_mediainit(struct wm_softc *);
419 static int wm_tbi_mediachange(struct ifnet *);
420 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
421
422 static void wm_tbi_set_linkled(struct wm_softc *);
423 static void wm_tbi_check_link(struct wm_softc *);
424
425 static void wm_gmii_reset(struct wm_softc *);
426
427 static int wm_gmii_i82543_readreg(struct device *, int, int);
428 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
429
430 static int wm_gmii_i82544_readreg(struct device *, int, int);
431 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
432
433 static void wm_gmii_statchg(struct device *);
434
435 static void wm_gmii_mediainit(struct wm_softc *);
436 static int wm_gmii_mediachange(struct ifnet *);
437 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
438
439 static int wm_match(struct device *, struct cfdata *, void *);
440 static void wm_attach(struct device *, struct device *, void *);
441
442 CFATTACH_DECL(wm, sizeof(struct wm_softc),
443 wm_match, wm_attach, NULL, NULL);
444
445 /*
446 * Devices supported by this driver.
447 */
448 const struct wm_product {
449 pci_vendor_id_t wmp_vendor;
450 pci_product_id_t wmp_product;
451 const char *wmp_name;
452 wm_chip_type wmp_type;
453 int wmp_flags;
454 #define WMP_F_1000X 0x01
455 #define WMP_F_1000T 0x02
456 } wm_products[] = {
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
458 "Intel i82542 1000BASE-X Ethernet",
459 WM_T_82542_2_1, WMP_F_1000X },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
462 "Intel i82543GC 1000BASE-X Ethernet",
463 WM_T_82543, WMP_F_1000X },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
466 "Intel i82543GC 1000BASE-T Ethernet",
467 WM_T_82543, WMP_F_1000T },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
470 "Intel i82544EI 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
474 "Intel i82544EI 1000BASE-X Ethernet",
475 WM_T_82544, WMP_F_1000X },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
478 "Intel i82544GC 1000BASE-T Ethernet",
479 WM_T_82544, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
482 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
483 WM_T_82544, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
486 "Intel i82540EM 1000BASE-T Ethernet",
487 WM_T_82540, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
490 "Intel i82540EP 1000BASE-T Ethernet",
491 WM_T_82540, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
494 "Intel i82540EP 1000BASE-T Ethernet",
495 WM_T_82540, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
498 "Intel i82540EP 1000BASE-T Ethernet",
499 WM_T_82540, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
502 "Intel i82545EM 1000BASE-T Ethernet",
503 WM_T_82545, WMP_F_1000T },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
506 "Intel i82546EB 1000BASE-T Ethernet",
507 WM_T_82546, WMP_F_1000T },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
510 "Intel i82546EB 1000BASE-T Ethernet",
511 WM_T_82546, WMP_F_1000T },
512
513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
514 "Intel i82545EM 1000BASE-X Ethernet",
515 WM_T_82545, WMP_F_1000X },
516
517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
518 "Intel i82546EB 1000BASE-X Ethernet",
519 WM_T_82546, WMP_F_1000X },
520
521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
522 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
523 WM_T_82540, WMP_F_1000T },
524
525 { 0, 0,
526 NULL,
527 0, 0 },
528 };
529
530 #ifdef WM_EVENT_COUNTERS
531 #if WM_NTXSEGS != 16
532 #error Update wm_txseg_evcnt_names
533 #endif
534 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
535 "txseg1",
536 "txseg2",
537 "txseg3",
538 "txseg4",
539 "txseg5",
540 "txseg6",
541 "txseg7",
542 "txseg8",
543 "txseg9",
544 "txseg10",
545 "txseg11",
546 "txseg12",
547 "txseg13",
548 "txseg14",
549 "txseg15",
550 "txseg16",
551 };
552 #endif /* WM_EVENT_COUNTERS */
553
554 static const struct wm_product *
555 wm_lookup(const struct pci_attach_args *pa)
556 {
557 const struct wm_product *wmp;
558
559 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
560 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
561 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
562 return (wmp);
563 }
564 return (NULL);
565 }
566
567 static int
568 wm_match(struct device *parent, struct cfdata *cf, void *aux)
569 {
570 struct pci_attach_args *pa = aux;
571
572 if (wm_lookup(pa) != NULL)
573 return (1);
574
575 return (0);
576 }
577
578 static void
579 wm_attach(struct device *parent, struct device *self, void *aux)
580 {
581 struct wm_softc *sc = (void *) self;
582 struct pci_attach_args *pa = aux;
583 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
584 pci_chipset_tag_t pc = pa->pa_pc;
585 pci_intr_handle_t ih;
586 const char *intrstr = NULL;
587 const char *eetype;
588 bus_space_tag_t memt;
589 bus_space_handle_t memh;
590 bus_dma_segment_t seg;
591 int memh_valid;
592 int i, rseg, error;
593 const struct wm_product *wmp;
594 uint8_t enaddr[ETHER_ADDR_LEN];
595 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
596 pcireg_t preg, memtype;
597 uint32_t reg;
598 int pmreg;
599
600 callout_init(&sc->sc_tick_ch);
601
602 wmp = wm_lookup(pa);
603 if (wmp == NULL) {
604 printf("\n");
605 panic("wm_attach: impossible");
606 }
607
608 sc->sc_dmat = pa->pa_dmat;
609
610 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
611 aprint_naive(": Ethernet controller\n");
612 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
613
614 sc->sc_type = wmp->wmp_type;
615 if (sc->sc_type < WM_T_82543) {
616 if (preg < 2) {
617 aprint_error("%s: i82542 must be at least rev. 2\n",
618 sc->sc_dev.dv_xname);
619 return;
620 }
621 if (preg < 3)
622 sc->sc_type = WM_T_82542_2_0;
623 }
624
625 /*
626 * Map the device.
627 */
628 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
629 switch (memtype) {
630 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
631 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
632 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
633 memtype, 0, &memt, &memh, NULL, NULL) == 0);
634 break;
635 default:
636 memh_valid = 0;
637 }
638
639 if (memh_valid) {
640 sc->sc_st = memt;
641 sc->sc_sh = memh;
642 } else {
643 aprint_error("%s: unable to map device registers\n",
644 sc->sc_dev.dv_xname);
645 return;
646 }
647
648 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
649 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
650 preg |= PCI_COMMAND_MASTER_ENABLE;
651 if (sc->sc_type < WM_T_82542_2_1)
652 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
653 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
654
655 /* Get it out of power save mode, if needed. */
656 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
657 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
658 PCI_PMCSR_STATE_MASK;
659 if (preg == PCI_PMCSR_STATE_D3) {
660 /*
661 * The card has lost all configuration data in
662 * this state, so punt.
663 */
664 aprint_error("%s: unable to wake from power state D3\n",
665 sc->sc_dev.dv_xname);
666 return;
667 }
668 if (preg != PCI_PMCSR_STATE_D0) {
669 aprint_normal("%s: waking up from power state D%d\n",
670 sc->sc_dev.dv_xname, preg);
671 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
672 PCI_PMCSR_STATE_D0);
673 }
674 }
675
676 /*
677 * Map and establish our interrupt.
678 */
679 if (pci_intr_map(pa, &ih)) {
680 aprint_error("%s: unable to map interrupt\n",
681 sc->sc_dev.dv_xname);
682 return;
683 }
684 intrstr = pci_intr_string(pc, ih);
685 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
686 if (sc->sc_ih == NULL) {
687 aprint_error("%s: unable to establish interrupt",
688 sc->sc_dev.dv_xname);
689 if (intrstr != NULL)
690 aprint_normal(" at %s", intrstr);
691 aprint_normal("\n");
692 return;
693 }
694 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
695
696 /*
697 * Allocate the control data structures, and create and load the
698 * DMA map for it.
699 */
700 if ((error = bus_dmamem_alloc(sc->sc_dmat,
701 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
702 0)) != 0) {
703 aprint_error(
704 "%s: unable to allocate control data, error = %d\n",
705 sc->sc_dev.dv_xname, error);
706 goto fail_0;
707 }
708
709 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
710 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
711 0)) != 0) {
712 aprint_error("%s: unable to map control data, error = %d\n",
713 sc->sc_dev.dv_xname, error);
714 goto fail_1;
715 }
716
717 if ((error = bus_dmamap_create(sc->sc_dmat,
718 sizeof(struct wm_control_data), 1,
719 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
720 aprint_error("%s: unable to create control data DMA map, "
721 "error = %d\n", sc->sc_dev.dv_xname, error);
722 goto fail_2;
723 }
724
725 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
726 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
727 0)) != 0) {
728 aprint_error(
729 "%s: unable to load control data DMA map, error = %d\n",
730 sc->sc_dev.dv_xname, error);
731 goto fail_3;
732 }
733
734 /*
735 * Create the transmit buffer DMA maps.
736 */
737 for (i = 0; i < WM_TXQUEUELEN; i++) {
738 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
739 WM_NTXSEGS, MCLBYTES, 0, 0,
740 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
741 aprint_error("%s: unable to create Tx DMA map %d, "
742 "error = %d\n", sc->sc_dev.dv_xname, i, error);
743 goto fail_4;
744 }
745 }
746
747 /*
748 * Create the receive buffer DMA maps.
749 */
750 for (i = 0; i < WM_NRXDESC; i++) {
751 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
752 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
753 aprint_error("%s: unable to create Rx DMA map %d, "
754 "error = %d\n", sc->sc_dev.dv_xname, i, error);
755 goto fail_5;
756 }
757 sc->sc_rxsoft[i].rxs_mbuf = NULL;
758 }
759
760 /*
761 * Reset the chip to a known state.
762 */
763 wm_reset(sc);
764
765 /*
766 * Get some information about the EEPROM.
767 */
768 eetype = "MicroWire";
769 if (sc->sc_type >= WM_T_82540)
770 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
771 if (sc->sc_type <= WM_T_82544)
772 sc->sc_ee_addrbits = 6;
773 else if (sc->sc_type <= WM_T_82546_3) {
774 reg = CSR_READ(sc, WMREG_EECD);
775 if (reg & EECD_EE_SIZE)
776 sc->sc_ee_addrbits = 8;
777 else
778 sc->sc_ee_addrbits = 6;
779 }
780 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
781 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
782 sc->sc_ee_addrbits, eetype);
783
784 /*
785 * Read the Ethernet address from the EEPROM.
786 */
787 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
788 sizeof(myea) / sizeof(myea[0]), myea)) {
789 aprint_error("%s: unable to read Ethernet address\n",
790 sc->sc_dev.dv_xname);
791 return;
792 }
793 enaddr[0] = myea[0] & 0xff;
794 enaddr[1] = myea[0] >> 8;
795 enaddr[2] = myea[1] & 0xff;
796 enaddr[3] = myea[1] >> 8;
797 enaddr[4] = myea[2] & 0xff;
798 enaddr[5] = myea[2] >> 8;
799
800 /*
801 * Toggle the LSB of the MAC address on the second port
802 * of the i82546.
803 */
804 if (sc->sc_type == WM_T_82546) {
805 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
806 enaddr[5] ^= 1;
807 }
808
809 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
810 ether_sprintf(enaddr));
811
812 /*
813 * Read the config info from the EEPROM, and set up various
814 * bits in the control registers based on their contents.
815 */
816 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
817 aprint_error("%s: unable to read CFG1 from EEPROM\n",
818 sc->sc_dev.dv_xname);
819 return;
820 }
821 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
822 aprint_error("%s: unable to read CFG2 from EEPROM\n",
823 sc->sc_dev.dv_xname);
824 return;
825 }
826 if (sc->sc_type >= WM_T_82544) {
827 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
828 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
829 sc->sc_dev.dv_xname);
830 return;
831 }
832 }
833
834 if (cfg1 & EEPROM_CFG1_ILOS)
835 sc->sc_ctrl |= CTRL_ILOS;
836 if (sc->sc_type >= WM_T_82544) {
837 sc->sc_ctrl |=
838 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
839 CTRL_SWDPIO_SHIFT;
840 sc->sc_ctrl |=
841 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
842 CTRL_SWDPINS_SHIFT;
843 } else {
844 sc->sc_ctrl |=
845 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
846 CTRL_SWDPIO_SHIFT;
847 }
848
849 #if 0
850 if (sc->sc_type >= WM_T_82544) {
851 if (cfg1 & EEPROM_CFG1_IPS0)
852 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
853 if (cfg1 & EEPROM_CFG1_IPS1)
854 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
855 sc->sc_ctrl_ext |=
856 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
857 CTRL_EXT_SWDPIO_SHIFT;
858 sc->sc_ctrl_ext |=
859 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
860 CTRL_EXT_SWDPINS_SHIFT;
861 } else {
862 sc->sc_ctrl_ext |=
863 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
864 CTRL_EXT_SWDPIO_SHIFT;
865 }
866 #endif
867
868 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
869 #if 0
870 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
871 #endif
872
873 /*
874 * Set up some register offsets that are different between
875 * the i82542 and the i82543 and later chips.
876 */
877 if (sc->sc_type < WM_T_82543) {
878 sc->sc_rdt_reg = WMREG_OLD_RDT0;
879 sc->sc_tdt_reg = WMREG_OLD_TDT;
880 } else {
881 sc->sc_rdt_reg = WMREG_RDT;
882 sc->sc_tdt_reg = WMREG_TDT;
883 }
884
885 /*
886 * Determine if we should use flow control. We should
887 * always use it, unless we're on a i82542 < 2.1.
888 */
889 if (sc->sc_type >= WM_T_82542_2_1)
890 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
891
892 /*
893 * Determine if we're TBI or GMII mode, and initialize the
894 * media structures accordingly.
895 */
896 if (sc->sc_type < WM_T_82543 ||
897 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
898 if (wmp->wmp_flags & WMP_F_1000T)
899 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
900 "product!\n", sc->sc_dev.dv_xname);
901 wm_tbi_mediainit(sc);
902 } else {
903 if (wmp->wmp_flags & WMP_F_1000X)
904 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
905 "product!\n", sc->sc_dev.dv_xname);
906 wm_gmii_mediainit(sc);
907 }
908
909 ifp = &sc->sc_ethercom.ec_if;
910 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
911 ifp->if_softc = sc;
912 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
913 ifp->if_ioctl = wm_ioctl;
914 ifp->if_start = wm_start;
915 ifp->if_watchdog = wm_watchdog;
916 ifp->if_init = wm_init;
917 ifp->if_stop = wm_stop;
918 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
919 IFQ_SET_READY(&ifp->if_snd);
920
921 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
922
923 /*
924 * If we're a i82543 or greater, we can support VLANs.
925 */
926 if (sc->sc_type >= WM_T_82543)
927 sc->sc_ethercom.ec_capabilities |=
928 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
929
930 /*
931 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
932 * on i82543 and later.
933 */
934 if (sc->sc_type >= WM_T_82543)
935 ifp->if_capabilities |=
936 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
937
938 /*
939 * Attach the interface.
940 */
941 if_attach(ifp);
942 ether_ifattach(ifp, enaddr);
943 #if NRND > 0
944 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
945 RND_TYPE_NET, 0);
946 #endif
947
948 #ifdef WM_EVENT_COUNTERS
949 /* Attach event counters. */
950 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
951 NULL, sc->sc_dev.dv_xname, "txsstall");
952 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
953 NULL, sc->sc_dev.dv_xname, "txdstall");
954 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
955 NULL, sc->sc_dev.dv_xname, "txforceintr");
956 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
957 NULL, sc->sc_dev.dv_xname, "txdw");
958 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
959 NULL, sc->sc_dev.dv_xname, "txqe");
960 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
961 NULL, sc->sc_dev.dv_xname, "rxintr");
962 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
963 NULL, sc->sc_dev.dv_xname, "linkintr");
964
965 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
966 NULL, sc->sc_dev.dv_xname, "rxipsum");
967 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
968 NULL, sc->sc_dev.dv_xname, "rxtusum");
969 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
970 NULL, sc->sc_dev.dv_xname, "txipsum");
971 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
972 NULL, sc->sc_dev.dv_xname, "txtusum");
973
974 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
975 NULL, sc->sc_dev.dv_xname, "txctx init");
976 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
977 NULL, sc->sc_dev.dv_xname, "txctx hit");
978 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
979 NULL, sc->sc_dev.dv_xname, "txctx miss");
980
981 for (i = 0; i < WM_NTXSEGS; i++)
982 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
983 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
984
985 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
986 NULL, sc->sc_dev.dv_xname, "txdrop");
987
988 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
989 NULL, sc->sc_dev.dv_xname, "tu");
990 #endif /* WM_EVENT_COUNTERS */
991
992 /*
993 * Make sure the interface is shutdown during reboot.
994 */
995 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
996 if (sc->sc_sdhook == NULL)
997 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
998 sc->sc_dev.dv_xname);
999 return;
1000
1001 /*
1002 * Free any resources we've allocated during the failed attach
1003 * attempt. Do this in reverse order and fall through.
1004 */
1005 fail_5:
1006 for (i = 0; i < WM_NRXDESC; i++) {
1007 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1008 bus_dmamap_destroy(sc->sc_dmat,
1009 sc->sc_rxsoft[i].rxs_dmamap);
1010 }
1011 fail_4:
1012 for (i = 0; i < WM_TXQUEUELEN; i++) {
1013 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1014 bus_dmamap_destroy(sc->sc_dmat,
1015 sc->sc_txsoft[i].txs_dmamap);
1016 }
1017 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1018 fail_3:
1019 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1020 fail_2:
1021 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1022 sizeof(struct wm_control_data));
1023 fail_1:
1024 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1025 fail_0:
1026 return;
1027 }
1028
1029 /*
1030 * wm_shutdown:
1031 *
1032 * Make sure the interface is stopped at reboot time.
1033 */
1034 static void
1035 wm_shutdown(void *arg)
1036 {
1037 struct wm_softc *sc = arg;
1038
1039 wm_stop(&sc->sc_ethercom.ec_if, 1);
1040 }
1041
1042 /*
1043 * wm_tx_cksum:
1044 *
1045 * Set up TCP/IP checksumming parameters for the
1046 * specified packet.
1047 */
1048 static int
1049 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1050 uint32_t *fieldsp)
1051 {
1052 struct mbuf *m0 = txs->txs_mbuf;
1053 struct livengood_tcpip_ctxdesc *t;
1054 uint32_t fields = 0, ipcs, tucs;
1055 struct ip *ip;
1056 struct ether_header *eh;
1057 int offset, iphl;
1058
1059 /*
1060 * XXX It would be nice if the mbuf pkthdr had offset
1061 * fields for the protocol headers.
1062 */
1063
1064 eh = mtod(m0, struct ether_header *);
1065 switch (htons(eh->ether_type)) {
1066 case ETHERTYPE_IP:
1067 iphl = sizeof(struct ip);
1068 offset = ETHER_HDR_LEN;
1069 break;
1070
1071 case ETHERTYPE_VLAN:
1072 iphl = sizeof(struct ip);
1073 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1074 break;
1075
1076 default:
1077 /*
1078 * Don't support this protocol or encapsulation.
1079 */
1080 *fieldsp = 0;
1081 *cmdp = 0;
1082 return (0);
1083 }
1084
1085 if (m0->m_len < (offset + iphl)) {
1086 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1087 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1088 "packet dropped\n", sc->sc_dev.dv_xname);
1089 return (ENOMEM);
1090 }
1091 m0 = txs->txs_mbuf;
1092 }
1093
1094 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1095 iphl = ip->ip_hl << 2;
1096
1097 /*
1098 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1099 * offload feature, if we load the context descriptor, we
1100 * MUST provide valid values for IPCSS and TUCSS fields.
1101 */
1102
1103 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1104 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1105 fields |= htole32(WTX_IXSM);
1106 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1107 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1108 WTX_TCPIP_IPCSE(offset + iphl - 1));
1109 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1110 /* Use the cached value. */
1111 ipcs = sc->sc_txctx_ipcs;
1112 } else {
1113 /* Just initialize it to the likely value anyway. */
1114 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1115 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1116 WTX_TCPIP_IPCSE(offset + iphl - 1));
1117 }
1118
1119 offset += iphl;
1120
1121 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1122 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1123 fields |= htole32(WTX_TXSM);
1124 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1125 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1126 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1127 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1128 /* Use the cached value. */
1129 tucs = sc->sc_txctx_tucs;
1130 } else {
1131 /* Just initialize it to a valid TCP context. */
1132 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1133 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1134 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1135 }
1136
1137 if (sc->sc_txctx_ipcs == ipcs &&
1138 sc->sc_txctx_tucs == tucs) {
1139 /* Cached context is fine. */
1140 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1141 } else {
1142 /* Fill in the context descriptor. */
1143 #ifdef WM_EVENT_COUNTERS
1144 if (sc->sc_txctx_ipcs == 0xffffffff &&
1145 sc->sc_txctx_tucs == 0xffffffff)
1146 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1147 else
1148 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1149 #endif
1150 t = (struct livengood_tcpip_ctxdesc *)
1151 &sc->sc_txdescs[sc->sc_txnext];
1152 t->tcpip_ipcs = ipcs;
1153 t->tcpip_tucs = tucs;
1154 t->tcpip_cmdlen =
1155 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1156 t->tcpip_seg = 0;
1157 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1158
1159 sc->sc_txctx_ipcs = ipcs;
1160 sc->sc_txctx_tucs = tucs;
1161
1162 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1163 txs->txs_ndesc++;
1164 }
1165
1166 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1167 *fieldsp = fields;
1168
1169 return (0);
1170 }
1171
1172 /*
1173 * wm_start: [ifnet interface function]
1174 *
1175 * Start packet transmission on the interface.
1176 */
1177 static void
1178 wm_start(struct ifnet *ifp)
1179 {
1180 struct wm_softc *sc = ifp->if_softc;
1181 struct mbuf *m0;
1182 #if 0 /* XXXJRT */
1183 struct m_tag *mtag;
1184 #endif
1185 struct wm_txsoft *txs;
1186 bus_dmamap_t dmamap;
1187 int error, nexttx, lasttx, ofree, seg;
1188 uint32_t cksumcmd, cksumfields;
1189
1190 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1191 return;
1192
1193 /*
1194 * Remember the previous number of free descriptors.
1195 */
1196 ofree = sc->sc_txfree;
1197
1198 /*
1199 * Loop through the send queue, setting up transmit descriptors
1200 * until we drain the queue, or use up all available transmit
1201 * descriptors.
1202 */
1203 for (;;) {
1204 /* Grab a packet off the queue. */
1205 IFQ_POLL(&ifp->if_snd, m0);
1206 if (m0 == NULL)
1207 break;
1208
1209 DPRINTF(WM_DEBUG_TX,
1210 ("%s: TX: have packet to transmit: %p\n",
1211 sc->sc_dev.dv_xname, m0));
1212
1213 /* Get a work queue entry. */
1214 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1215 wm_txintr(sc);
1216 if (sc->sc_txsfree == 0) {
1217 DPRINTF(WM_DEBUG_TX,
1218 ("%s: TX: no free job descriptors\n",
1219 sc->sc_dev.dv_xname));
1220 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1221 break;
1222 }
1223 }
1224
1225 txs = &sc->sc_txsoft[sc->sc_txsnext];
1226 dmamap = txs->txs_dmamap;
1227
1228 /*
1229 * Load the DMA map. If this fails, the packet either
1230 * didn't fit in the allotted number of segments, or we
1231 * were short on resources. For the too-many-segments
1232 * case, we simply report an error and drop the packet,
1233 * since we can't sanely copy a jumbo packet to a single
1234 * buffer.
1235 */
1236 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1237 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1238 if (error) {
1239 if (error == EFBIG) {
1240 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1241 printf("%s: Tx packet consumes too many "
1242 "DMA segments, dropping...\n",
1243 sc->sc_dev.dv_xname);
1244 IFQ_DEQUEUE(&ifp->if_snd, m0);
1245 m_freem(m0);
1246 continue;
1247 }
1248 /*
1249 * Short on resources, just stop for now.
1250 */
1251 DPRINTF(WM_DEBUG_TX,
1252 ("%s: TX: dmamap load failed: %d\n",
1253 sc->sc_dev.dv_xname, error));
1254 break;
1255 }
1256
1257 /*
1258 * Ensure we have enough descriptors free to describe
1259 * the packet. Note, we always reserve one descriptor
1260 * at the end of the ring due to the semantics of the
1261 * TDT register, plus one more in the event we need
1262 * to re-load checksum offload context.
1263 */
1264 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1265 /*
1266 * Not enough free descriptors to transmit this
1267 * packet. We haven't committed anything yet,
1268 * so just unload the DMA map, put the packet
1269 * pack on the queue, and punt. Notify the upper
1270 * layer that there are no more slots left.
1271 */
1272 DPRINTF(WM_DEBUG_TX,
1273 ("%s: TX: need %d descriptors, have %d\n",
1274 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1275 sc->sc_txfree - 1));
1276 ifp->if_flags |= IFF_OACTIVE;
1277 bus_dmamap_unload(sc->sc_dmat, dmamap);
1278 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1279 break;
1280 }
1281
1282 IFQ_DEQUEUE(&ifp->if_snd, m0);
1283
1284 /*
1285 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1286 */
1287
1288 /* Sync the DMA map. */
1289 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1290 BUS_DMASYNC_PREWRITE);
1291
1292 DPRINTF(WM_DEBUG_TX,
1293 ("%s: TX: packet has %d DMA segments\n",
1294 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1295
1296 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1297
1298 /*
1299 * Store a pointer to the packet so that we can free it
1300 * later.
1301 *
1302 * Initially, we consider the number of descriptors the
1303 * packet uses the number of DMA segments. This may be
1304 * incremented by 1 if we do checksum offload (a descriptor
1305 * is used to set the checksum context).
1306 */
1307 txs->txs_mbuf = m0;
1308 txs->txs_firstdesc = sc->sc_txnext;
1309 txs->txs_ndesc = dmamap->dm_nsegs;
1310
1311 /*
1312 * Set up checksum offload parameters for
1313 * this packet.
1314 */
1315 if (m0->m_pkthdr.csum_flags &
1316 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1317 if (wm_tx_cksum(sc, txs, &cksumcmd,
1318 &cksumfields) != 0) {
1319 /* Error message already displayed. */
1320 bus_dmamap_unload(sc->sc_dmat, dmamap);
1321 continue;
1322 }
1323 } else {
1324 cksumcmd = 0;
1325 cksumfields = 0;
1326 }
1327
1328 cksumcmd |= htole32(WTX_CMD_IDE);
1329
1330 /*
1331 * Initialize the transmit descriptor.
1332 */
1333 for (nexttx = sc->sc_txnext, seg = 0;
1334 seg < dmamap->dm_nsegs;
1335 seg++, nexttx = WM_NEXTTX(nexttx)) {
1336 /*
1337 * Note: we currently only use 32-bit DMA
1338 * addresses.
1339 */
1340 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1341 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1342 htole32(dmamap->dm_segs[seg].ds_addr);
1343 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1344 htole32(dmamap->dm_segs[seg].ds_len);
1345 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1346 cksumfields;
1347 lasttx = nexttx;
1348
1349 DPRINTF(WM_DEBUG_TX,
1350 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1351 sc->sc_dev.dv_xname, nexttx,
1352 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1353 (uint32_t) dmamap->dm_segs[seg].ds_len));
1354 }
1355
1356 /*
1357 * Set up the command byte on the last descriptor of
1358 * the packet. If we're in the interrupt delay window,
1359 * delay the interrupt.
1360 */
1361 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1362 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1363
1364 #if 0 /* XXXJRT */
1365 /*
1366 * If VLANs are enabled and the packet has a VLAN tag, set
1367 * up the descriptor to encapsulate the packet for us.
1368 *
1369 * This is only valid on the last descriptor of the packet.
1370 */
1371 if (sc->sc_ethercom.ec_nvlans != 0 &&
1372 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1373 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1374 htole32(WTX_CMD_VLE);
1375 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1376 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1377 }
1378 #endif /* XXXJRT */
1379
1380 txs->txs_lastdesc = lasttx;
1381
1382 DPRINTF(WM_DEBUG_TX,
1383 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1384 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1385
1386 /* Sync the descriptors we're using. */
1387 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1388 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1389
1390 /* Give the packet to the chip. */
1391 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1392
1393 DPRINTF(WM_DEBUG_TX,
1394 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1395
1396 DPRINTF(WM_DEBUG_TX,
1397 ("%s: TX: finished transmitting packet, job %d\n",
1398 sc->sc_dev.dv_xname, sc->sc_txsnext));
1399
1400 /* Advance the tx pointer. */
1401 sc->sc_txfree -= txs->txs_ndesc;
1402 sc->sc_txnext = nexttx;
1403
1404 sc->sc_txsfree--;
1405 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1406
1407 #if NBPFILTER > 0
1408 /* Pass the packet to any BPF listeners. */
1409 if (ifp->if_bpf)
1410 bpf_mtap(ifp->if_bpf, m0);
1411 #endif /* NBPFILTER > 0 */
1412 }
1413
1414 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1415 /* No more slots; notify upper layer. */
1416 ifp->if_flags |= IFF_OACTIVE;
1417 }
1418
1419 if (sc->sc_txfree != ofree) {
1420 /* Set a watchdog timer in case the chip flakes out. */
1421 ifp->if_timer = 5;
1422 }
1423 }
1424
1425 /*
1426 * wm_watchdog: [ifnet interface function]
1427 *
1428 * Watchdog timer handler.
1429 */
1430 static void
1431 wm_watchdog(struct ifnet *ifp)
1432 {
1433 struct wm_softc *sc = ifp->if_softc;
1434
1435 /*
1436 * Since we're using delayed interrupts, sweep up
1437 * before we report an error.
1438 */
1439 wm_txintr(sc);
1440
1441 if (sc->sc_txfree != WM_NTXDESC) {
1442 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1443 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1444 sc->sc_txnext);
1445 ifp->if_oerrors++;
1446
1447 /* Reset the interface. */
1448 (void) wm_init(ifp);
1449 }
1450
1451 /* Try to get more packets going. */
1452 wm_start(ifp);
1453 }
1454
1455 /*
1456 * wm_ioctl: [ifnet interface function]
1457 *
1458 * Handle control requests from the operator.
1459 */
1460 static int
1461 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1462 {
1463 struct wm_softc *sc = ifp->if_softc;
1464 struct ifreq *ifr = (struct ifreq *) data;
1465 int s, error;
1466
1467 s = splnet();
1468
1469 switch (cmd) {
1470 case SIOCSIFMEDIA:
1471 case SIOCGIFMEDIA:
1472 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1473 break;
1474 default:
1475 error = ether_ioctl(ifp, cmd, data);
1476 if (error == ENETRESET) {
1477 /*
1478 * Multicast list has changed; set the hardware filter
1479 * accordingly.
1480 */
1481 wm_set_filter(sc);
1482 error = 0;
1483 }
1484 break;
1485 }
1486
1487 /* Try to get more packets going. */
1488 wm_start(ifp);
1489
1490 splx(s);
1491 return (error);
1492 }
1493
1494 /*
1495 * wm_intr:
1496 *
1497 * Interrupt service routine.
1498 */
1499 static int
1500 wm_intr(void *arg)
1501 {
1502 struct wm_softc *sc = arg;
1503 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1504 uint32_t icr;
1505 int wantinit, handled = 0;
1506
1507 for (wantinit = 0; wantinit == 0;) {
1508 icr = CSR_READ(sc, WMREG_ICR);
1509 if ((icr & sc->sc_icr) == 0)
1510 break;
1511
1512 #if 0 /*NRND > 0*/
1513 if (RND_ENABLED(&sc->rnd_source))
1514 rnd_add_uint32(&sc->rnd_source, icr);
1515 #endif
1516
1517 handled = 1;
1518
1519 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1520 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1521 DPRINTF(WM_DEBUG_RX,
1522 ("%s: RX: got Rx intr 0x%08x\n",
1523 sc->sc_dev.dv_xname,
1524 icr & (ICR_RXDMT0|ICR_RXT0)));
1525 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1526 }
1527 #endif
1528 wm_rxintr(sc);
1529
1530 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1531 if (icr & ICR_TXDW) {
1532 DPRINTF(WM_DEBUG_TX,
1533 ("%s: TX: got TDXW interrupt\n",
1534 sc->sc_dev.dv_xname));
1535 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1536 }
1537 #endif
1538 wm_txintr(sc);
1539
1540 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1541 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1542 wm_linkintr(sc, icr);
1543 }
1544
1545 if (icr & ICR_RXO) {
1546 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1547 wantinit = 1;
1548 }
1549 }
1550
1551 if (handled) {
1552 if (wantinit)
1553 wm_init(ifp);
1554
1555 /* Try to get more packets going. */
1556 wm_start(ifp);
1557 }
1558
1559 return (handled);
1560 }
1561
1562 /*
1563 * wm_txintr:
1564 *
1565 * Helper; handle transmit interrupts.
1566 */
1567 static void
1568 wm_txintr(struct wm_softc *sc)
1569 {
1570 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1571 struct wm_txsoft *txs;
1572 uint8_t status;
1573 int i;
1574
1575 ifp->if_flags &= ~IFF_OACTIVE;
1576
1577 /*
1578 * Go through the Tx list and free mbufs for those
1579 * frames which have been transmitted.
1580 */
1581 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1582 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1583 txs = &sc->sc_txsoft[i];
1584
1585 DPRINTF(WM_DEBUG_TX,
1586 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1587
1588 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1589 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1590
1591 status = le32toh(sc->sc_txdescs[
1592 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1593 if ((status & WTX_ST_DD) == 0) {
1594 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1595 BUS_DMASYNC_PREREAD);
1596 break;
1597 }
1598
1599 DPRINTF(WM_DEBUG_TX,
1600 ("%s: TX: job %d done: descs %d..%d\n",
1601 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1602 txs->txs_lastdesc));
1603
1604 /*
1605 * XXX We should probably be using the statistics
1606 * XXX registers, but I don't know if they exist
1607 * XXX on chips before the i82544.
1608 */
1609
1610 #ifdef WM_EVENT_COUNTERS
1611 if (status & WTX_ST_TU)
1612 WM_EVCNT_INCR(&sc->sc_ev_tu);
1613 #endif /* WM_EVENT_COUNTERS */
1614
1615 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1616 ifp->if_oerrors++;
1617 if (status & WTX_ST_LC)
1618 printf("%s: late collision\n",
1619 sc->sc_dev.dv_xname);
1620 else if (status & WTX_ST_EC) {
1621 ifp->if_collisions += 16;
1622 printf("%s: excessive collisions\n",
1623 sc->sc_dev.dv_xname);
1624 }
1625 } else
1626 ifp->if_opackets++;
1627
1628 sc->sc_txfree += txs->txs_ndesc;
1629 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1630 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1631 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1632 m_freem(txs->txs_mbuf);
1633 txs->txs_mbuf = NULL;
1634 }
1635
1636 /* Update the dirty transmit buffer pointer. */
1637 sc->sc_txsdirty = i;
1638 DPRINTF(WM_DEBUG_TX,
1639 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1640
1641 /*
1642 * If there are no more pending transmissions, cancel the watchdog
1643 * timer.
1644 */
1645 if (sc->sc_txsfree == WM_TXQUEUELEN)
1646 ifp->if_timer = 0;
1647 }
1648
1649 /*
1650 * wm_rxintr:
1651 *
1652 * Helper; handle receive interrupts.
1653 */
1654 static void
1655 wm_rxintr(struct wm_softc *sc)
1656 {
1657 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1658 struct wm_rxsoft *rxs;
1659 struct mbuf *m;
1660 int i, len;
1661 uint8_t status, errors;
1662
1663 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1664 rxs = &sc->sc_rxsoft[i];
1665
1666 DPRINTF(WM_DEBUG_RX,
1667 ("%s: RX: checking descriptor %d\n",
1668 sc->sc_dev.dv_xname, i));
1669
1670 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1671
1672 status = sc->sc_rxdescs[i].wrx_status;
1673 errors = sc->sc_rxdescs[i].wrx_errors;
1674 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1675
1676 if ((status & WRX_ST_DD) == 0) {
1677 /*
1678 * We have processed all of the receive descriptors.
1679 */
1680 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1681 break;
1682 }
1683
1684 if (__predict_false(sc->sc_rxdiscard)) {
1685 DPRINTF(WM_DEBUG_RX,
1686 ("%s: RX: discarding contents of descriptor %d\n",
1687 sc->sc_dev.dv_xname, i));
1688 WM_INIT_RXDESC(sc, i);
1689 if (status & WRX_ST_EOP) {
1690 /* Reset our state. */
1691 DPRINTF(WM_DEBUG_RX,
1692 ("%s: RX: resetting rxdiscard -> 0\n",
1693 sc->sc_dev.dv_xname));
1694 sc->sc_rxdiscard = 0;
1695 }
1696 continue;
1697 }
1698
1699 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1700 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1701
1702 m = rxs->rxs_mbuf;
1703
1704 /*
1705 * Add a new receive buffer to the ring.
1706 */
1707 if (wm_add_rxbuf(sc, i) != 0) {
1708 /*
1709 * Failed, throw away what we've done so
1710 * far, and discard the rest of the packet.
1711 */
1712 ifp->if_ierrors++;
1713 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1714 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1715 WM_INIT_RXDESC(sc, i);
1716 if ((status & WRX_ST_EOP) == 0)
1717 sc->sc_rxdiscard = 1;
1718 if (sc->sc_rxhead != NULL)
1719 m_freem(sc->sc_rxhead);
1720 WM_RXCHAIN_RESET(sc);
1721 DPRINTF(WM_DEBUG_RX,
1722 ("%s: RX: Rx buffer allocation failed, "
1723 "dropping packet%s\n", sc->sc_dev.dv_xname,
1724 sc->sc_rxdiscard ? " (discard)" : ""));
1725 continue;
1726 }
1727
1728 WM_RXCHAIN_LINK(sc, m);
1729
1730 m->m_len = len;
1731
1732 DPRINTF(WM_DEBUG_RX,
1733 ("%s: RX: buffer at %p len %d\n",
1734 sc->sc_dev.dv_xname, m->m_data, len));
1735
1736 /*
1737 * If this is not the end of the packet, keep
1738 * looking.
1739 */
1740 if ((status & WRX_ST_EOP) == 0) {
1741 sc->sc_rxlen += len;
1742 DPRINTF(WM_DEBUG_RX,
1743 ("%s: RX: not yet EOP, rxlen -> %d\n",
1744 sc->sc_dev.dv_xname, sc->sc_rxlen));
1745 continue;
1746 }
1747
1748 /*
1749 * Okay, we have the entire packet now...
1750 */
1751 *sc->sc_rxtailp = NULL;
1752 m = sc->sc_rxhead;
1753 len += sc->sc_rxlen;
1754
1755 WM_RXCHAIN_RESET(sc);
1756
1757 DPRINTF(WM_DEBUG_RX,
1758 ("%s: RX: have entire packet, len -> %d\n",
1759 sc->sc_dev.dv_xname, len));
1760
1761 /*
1762 * If an error occurred, update stats and drop the packet.
1763 */
1764 if (errors &
1765 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1766 ifp->if_ierrors++;
1767 if (errors & WRX_ER_SE)
1768 printf("%s: symbol error\n",
1769 sc->sc_dev.dv_xname);
1770 else if (errors & WRX_ER_SEQ)
1771 printf("%s: receive sequence error\n",
1772 sc->sc_dev.dv_xname);
1773 else if (errors & WRX_ER_CE)
1774 printf("%s: CRC error\n",
1775 sc->sc_dev.dv_xname);
1776 m_freem(m);
1777 continue;
1778 }
1779
1780 /*
1781 * No errors. Receive the packet.
1782 *
1783 * Note, we have configured the chip to include the
1784 * CRC with every packet.
1785 */
1786 m->m_flags |= M_HASFCS;
1787 m->m_pkthdr.rcvif = ifp;
1788 m->m_pkthdr.len = len;
1789
1790 #if 0 /* XXXJRT */
1791 /*
1792 * If VLANs are enabled, VLAN packets have been unwrapped
1793 * for us. Associate the tag with the packet.
1794 */
1795 if (sc->sc_ethercom.ec_nvlans != 0 &&
1796 (status & WRX_ST_VP) != 0) {
1797 struct m_tag *vtag;
1798
1799 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1800 M_NOWAIT);
1801 if (vtag == NULL) {
1802 ifp->if_ierrors++;
1803 printf("%s: unable to allocate VLAN tag\n",
1804 sc->sc_dev.dv_xname);
1805 m_freem(m);
1806 continue;
1807 }
1808
1809 *(u_int *)(vtag + 1) =
1810 le16toh(sc->sc_rxdescs[i].wrx_special);
1811 }
1812 #endif /* XXXJRT */
1813
1814 /*
1815 * Set up checksum info for this packet.
1816 */
1817 if (status & WRX_ST_IPCS) {
1818 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1819 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1820 if (errors & WRX_ER_IPE)
1821 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1822 }
1823 if (status & WRX_ST_TCPCS) {
1824 /*
1825 * Note: we don't know if this was TCP or UDP,
1826 * so we just set both bits, and expect the
1827 * upper layers to deal.
1828 */
1829 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1830 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1831 if (errors & WRX_ER_TCPE)
1832 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1833 }
1834
1835 ifp->if_ipackets++;
1836
1837 #if NBPFILTER > 0
1838 /* Pass this up to any BPF listeners. */
1839 if (ifp->if_bpf)
1840 bpf_mtap(ifp->if_bpf, m);
1841 #endif /* NBPFILTER > 0 */
1842
1843 /* Pass it on. */
1844 (*ifp->if_input)(ifp, m);
1845 }
1846
1847 /* Update the receive pointer. */
1848 sc->sc_rxptr = i;
1849
1850 DPRINTF(WM_DEBUG_RX,
1851 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1852 }
1853
1854 /*
1855 * wm_linkintr:
1856 *
1857 * Helper; handle link interrupts.
1858 */
1859 static void
1860 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1861 {
1862 uint32_t status;
1863
1864 /*
1865 * If we get a link status interrupt on a 1000BASE-T
1866 * device, just fall into the normal MII tick path.
1867 */
1868 if (sc->sc_flags & WM_F_HAS_MII) {
1869 if (icr & ICR_LSC) {
1870 DPRINTF(WM_DEBUG_LINK,
1871 ("%s: LINK: LSC -> mii_tick\n",
1872 sc->sc_dev.dv_xname));
1873 mii_tick(&sc->sc_mii);
1874 } else if (icr & ICR_RXSEQ) {
1875 DPRINTF(WM_DEBUG_LINK,
1876 ("%s: LINK Receive sequence error\n",
1877 sc->sc_dev.dv_xname));
1878 }
1879 return;
1880 }
1881
1882 /*
1883 * If we are now receiving /C/, check for link again in
1884 * a couple of link clock ticks.
1885 */
1886 if (icr & ICR_RXCFG) {
1887 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1888 sc->sc_dev.dv_xname));
1889 sc->sc_tbi_anstate = 2;
1890 }
1891
1892 if (icr & ICR_LSC) {
1893 status = CSR_READ(sc, WMREG_STATUS);
1894 if (status & STATUS_LU) {
1895 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1896 sc->sc_dev.dv_xname,
1897 (status & STATUS_FD) ? "FDX" : "HDX"));
1898 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1899 if (status & STATUS_FD)
1900 sc->sc_tctl |=
1901 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1902 else
1903 sc->sc_tctl |=
1904 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1905 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1906 sc->sc_tbi_linkup = 1;
1907 } else {
1908 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1909 sc->sc_dev.dv_xname));
1910 sc->sc_tbi_linkup = 0;
1911 }
1912 sc->sc_tbi_anstate = 2;
1913 wm_tbi_set_linkled(sc);
1914 } else if (icr & ICR_RXSEQ) {
1915 DPRINTF(WM_DEBUG_LINK,
1916 ("%s: LINK: Receive sequence error\n",
1917 sc->sc_dev.dv_xname));
1918 }
1919 }
1920
1921 /*
1922 * wm_tick:
1923 *
1924 * One second timer, used to check link status, sweep up
1925 * completed transmit jobs, etc.
1926 */
1927 static void
1928 wm_tick(void *arg)
1929 {
1930 struct wm_softc *sc = arg;
1931 int s;
1932
1933 s = splnet();
1934
1935 if (sc->sc_flags & WM_F_HAS_MII)
1936 mii_tick(&sc->sc_mii);
1937 else
1938 wm_tbi_check_link(sc);
1939
1940 splx(s);
1941
1942 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1943 }
1944
1945 /*
1946 * wm_reset:
1947 *
1948 * Reset the i82542 chip.
1949 */
1950 static void
1951 wm_reset(struct wm_softc *sc)
1952 {
1953 int i;
1954
1955 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1956 delay(10000);
1957
1958 for (i = 0; i < 1000; i++) {
1959 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1960 return;
1961 delay(20);
1962 }
1963
1964 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1965 printf("%s: WARNING: reset failed to complete\n",
1966 sc->sc_dev.dv_xname);
1967 }
1968
1969 /*
1970 * wm_init: [ifnet interface function]
1971 *
1972 * Initialize the interface. Must be called at splnet().
1973 */
1974 static int
1975 wm_init(struct ifnet *ifp)
1976 {
1977 struct wm_softc *sc = ifp->if_softc;
1978 struct wm_rxsoft *rxs;
1979 int i, error = 0;
1980 uint32_t reg;
1981
1982 /*
1983 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1984 * There is a small but measurable benefit to avoiding the adjusment
1985 * of the descriptor so that the headers are aligned, for normal mtu,
1986 * on such platforms. One possibility is that the DMA itself is
1987 * slightly more efficient if the front of the entire packet (instead
1988 * of the front of the headers) is aligned.
1989 *
1990 * Note we must always set align_tweak to 0 if we are using
1991 * jumbo frames.
1992 */
1993 #ifdef __NO_STRICT_ALIGNMENT
1994 sc->sc_align_tweak = 0;
1995 #else
1996 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1997 sc->sc_align_tweak = 0;
1998 else
1999 sc->sc_align_tweak = 2;
2000 #endif /* __NO_STRICT_ALIGNMENT */
2001
2002 /* Cancel any pending I/O. */
2003 wm_stop(ifp, 0);
2004
2005 /* Reset the chip to a known state. */
2006 wm_reset(sc);
2007
2008 /* Initialize the transmit descriptor ring. */
2009 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2010 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2011 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2012 sc->sc_txfree = WM_NTXDESC;
2013 sc->sc_txnext = 0;
2014
2015 sc->sc_txctx_ipcs = 0xffffffff;
2016 sc->sc_txctx_tucs = 0xffffffff;
2017
2018 if (sc->sc_type < WM_T_82543) {
2019 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2020 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2021 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2022 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2023 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2024 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2025 } else {
2026 CSR_WRITE(sc, WMREG_TBDAH, 0);
2027 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2028 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2029 CSR_WRITE(sc, WMREG_TDH, 0);
2030 CSR_WRITE(sc, WMREG_TDT, 0);
2031 CSR_WRITE(sc, WMREG_TIDV, 128);
2032
2033 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2034 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2035 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2036 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2037 }
2038 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2039 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2040
2041 /* Initialize the transmit job descriptors. */
2042 for (i = 0; i < WM_TXQUEUELEN; i++)
2043 sc->sc_txsoft[i].txs_mbuf = NULL;
2044 sc->sc_txsfree = WM_TXQUEUELEN;
2045 sc->sc_txsnext = 0;
2046 sc->sc_txsdirty = 0;
2047
2048 /*
2049 * Initialize the receive descriptor and receive job
2050 * descriptor rings.
2051 */
2052 if (sc->sc_type < WM_T_82543) {
2053 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2054 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2055 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2056 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2057 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2058 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2059
2060 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2061 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2062 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2063 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2064 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2065 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2066 } else {
2067 CSR_WRITE(sc, WMREG_RDBAH, 0);
2068 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2069 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2070 CSR_WRITE(sc, WMREG_RDH, 0);
2071 CSR_WRITE(sc, WMREG_RDT, 0);
2072 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2073 }
2074 for (i = 0; i < WM_NRXDESC; i++) {
2075 rxs = &sc->sc_rxsoft[i];
2076 if (rxs->rxs_mbuf == NULL) {
2077 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2078 printf("%s: unable to allocate or map rx "
2079 "buffer %d, error = %d\n",
2080 sc->sc_dev.dv_xname, i, error);
2081 /*
2082 * XXX Should attempt to run with fewer receive
2083 * XXX buffers instead of just failing.
2084 */
2085 wm_rxdrain(sc);
2086 goto out;
2087 }
2088 } else
2089 WM_INIT_RXDESC(sc, i);
2090 }
2091 sc->sc_rxptr = 0;
2092 sc->sc_rxdiscard = 0;
2093 WM_RXCHAIN_RESET(sc);
2094
2095 /*
2096 * Clear out the VLAN table -- we don't use it (yet).
2097 */
2098 CSR_WRITE(sc, WMREG_VET, 0);
2099 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2100 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2101
2102 /*
2103 * Set up flow-control parameters.
2104 *
2105 * XXX Values could probably stand some tuning.
2106 */
2107 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2108 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2109 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2110 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2111
2112 if (sc->sc_type < WM_T_82543) {
2113 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2114 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2115 } else {
2116 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2117 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2118 }
2119 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2120 }
2121
2122 #if 0 /* XXXJRT */
2123 /* Deal with VLAN enables. */
2124 if (sc->sc_ethercom.ec_nvlans != 0)
2125 sc->sc_ctrl |= CTRL_VME;
2126 else
2127 #endif /* XXXJRT */
2128 sc->sc_ctrl &= ~CTRL_VME;
2129
2130 /* Write the control registers. */
2131 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2132 #if 0
2133 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2134 #endif
2135
2136 /*
2137 * Set up checksum offload parameters.
2138 */
2139 reg = CSR_READ(sc, WMREG_RXCSUM);
2140 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2141 reg |= RXCSUM_IPOFL;
2142 else
2143 reg &= ~RXCSUM_IPOFL;
2144 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2145 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2146 else {
2147 reg &= ~RXCSUM_TUOFL;
2148 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2149 reg &= ~RXCSUM_IPOFL;
2150 }
2151 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2152
2153 /*
2154 * Set up the interrupt registers.
2155 */
2156 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2157 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2158 ICR_RXO | ICR_RXT0;
2159 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2160 sc->sc_icr |= ICR_RXCFG;
2161 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2162
2163 /* Set up the inter-packet gap. */
2164 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2165
2166 #if 0 /* XXXJRT */
2167 /* Set the VLAN ethernetype. */
2168 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2169 #endif
2170
2171 /*
2172 * Set up the transmit control register; we start out with
2173 * a collision distance suitable for FDX, but update it whe
2174 * we resolve the media type.
2175 */
2176 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2177 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2178 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2179
2180 /* Set the media. */
2181 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2182
2183 /*
2184 * Set up the receive control register; we actually program
2185 * the register when we set the receive filter. Use multicast
2186 * address offset type 0.
2187 *
2188 * Only the i82544 has the ability to strip the incoming
2189 * CRC, so we don't enable that feature.
2190 */
2191 sc->sc_mchash_type = 0;
2192 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2193 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2194
2195 if(MCLBYTES == 2048) {
2196 sc->sc_rctl |= RCTL_2k;
2197 } else {
2198 /*
2199 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2200 * XXX segments, dropping" -- why?
2201 */
2202 #if 0
2203 if(sc->sc_type >= WM_T_82543) {
2204 switch(MCLBYTES) {
2205 case 4096:
2206 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2207 break;
2208 case 8192:
2209 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2210 break;
2211 case 16384:
2212 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2213 break;
2214 default:
2215 panic("wm_init: MCLBYTES %d unsupported",
2216 MCLBYTES);
2217 break;
2218 }
2219 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2220 #else
2221 panic("wm_init: MCLBYTES > 2048 not supported.");
2222 #endif
2223 }
2224
2225 /* Set the receive filter. */
2226 wm_set_filter(sc);
2227
2228 /* Start the one second link check clock. */
2229 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2230
2231 /* ...all done! */
2232 ifp->if_flags |= IFF_RUNNING;
2233 ifp->if_flags &= ~IFF_OACTIVE;
2234
2235 out:
2236 if (error)
2237 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2238 return (error);
2239 }
2240
2241 /*
2242 * wm_rxdrain:
2243 *
2244 * Drain the receive queue.
2245 */
2246 static void
2247 wm_rxdrain(struct wm_softc *sc)
2248 {
2249 struct wm_rxsoft *rxs;
2250 int i;
2251
2252 for (i = 0; i < WM_NRXDESC; i++) {
2253 rxs = &sc->sc_rxsoft[i];
2254 if (rxs->rxs_mbuf != NULL) {
2255 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2256 m_freem(rxs->rxs_mbuf);
2257 rxs->rxs_mbuf = NULL;
2258 }
2259 }
2260 }
2261
2262 /*
2263 * wm_stop: [ifnet interface function]
2264 *
2265 * Stop transmission on the interface.
2266 */
2267 static void
2268 wm_stop(struct ifnet *ifp, int disable)
2269 {
2270 struct wm_softc *sc = ifp->if_softc;
2271 struct wm_txsoft *txs;
2272 int i;
2273
2274 /* Stop the one second clock. */
2275 callout_stop(&sc->sc_tick_ch);
2276
2277 if (sc->sc_flags & WM_F_HAS_MII) {
2278 /* Down the MII. */
2279 mii_down(&sc->sc_mii);
2280 }
2281
2282 /* Stop the transmit and receive processes. */
2283 CSR_WRITE(sc, WMREG_TCTL, 0);
2284 CSR_WRITE(sc, WMREG_RCTL, 0);
2285
2286 /* Release any queued transmit buffers. */
2287 for (i = 0; i < WM_TXQUEUELEN; i++) {
2288 txs = &sc->sc_txsoft[i];
2289 if (txs->txs_mbuf != NULL) {
2290 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2291 m_freem(txs->txs_mbuf);
2292 txs->txs_mbuf = NULL;
2293 }
2294 }
2295
2296 if (disable)
2297 wm_rxdrain(sc);
2298
2299 /* Mark the interface as down and cancel the watchdog timer. */
2300 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2301 ifp->if_timer = 0;
2302 }
2303
2304 /*
2305 * wm_acquire_eeprom:
2306 *
2307 * Perform the EEPROM handshake required on some chips.
2308 */
2309 static int
2310 wm_acquire_eeprom(struct wm_softc *sc)
2311 {
2312 uint32_t reg;
2313 int x;
2314
2315 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2316 reg = CSR_READ(sc, WMREG_EECD);
2317
2318 /* Request EEPROM access. */
2319 reg |= EECD_EE_REQ;
2320 CSR_WRITE(sc, WMREG_EECD, reg);
2321
2322 /* ..and wait for it to be granted. */
2323 for (x = 0; x < 100; x++) {
2324 reg = CSR_READ(sc, WMREG_EECD);
2325 if (reg & EECD_EE_GNT)
2326 break;
2327 delay(5);
2328 }
2329 if ((reg & EECD_EE_GNT) == 0) {
2330 aprint_error("%s: could not acquire EEPROM GNT\n",
2331 sc->sc_dev.dv_xname);
2332 reg &= ~EECD_EE_REQ;
2333 CSR_WRITE(sc, WMREG_EECD, reg);
2334 return (1);
2335 }
2336 }
2337
2338 return (0);
2339 }
2340
2341 /*
2342 * wm_release_eeprom:
2343 *
2344 * Release the EEPROM mutex.
2345 */
2346 static void
2347 wm_release_eeprom(struct wm_softc *sc)
2348 {
2349 uint32_t reg;
2350
2351 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2352 reg = CSR_READ(sc, WMREG_EECD);
2353 reg &= ~EECD_EE_REQ;
2354 CSR_WRITE(sc, WMREG_EECD, reg);
2355 }
2356 }
2357
2358 /*
2359 * wm_eeprom_sendbits:
2360 *
2361 * Send a series of bits to the EEPROM.
2362 */
2363 static void
2364 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2365 {
2366 uint32_t reg;
2367 int x;
2368
2369 reg = CSR_READ(sc, WMREG_EECD);
2370
2371 for (x = nbits; x > 0; x--) {
2372 if (bits & (1U << (x - 1)))
2373 reg |= EECD_DI;
2374 else
2375 reg &= ~EECD_DI;
2376 CSR_WRITE(sc, WMREG_EECD, reg);
2377 delay(2);
2378 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2379 delay(2);
2380 CSR_WRITE(sc, WMREG_EECD, reg);
2381 delay(2);
2382 }
2383 }
2384
2385 /*
2386 * wm_eeprom_recvbits:
2387 *
2388 * Receive a series of bits from the EEPROM.
2389 */
2390 static void
2391 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2392 {
2393 uint32_t reg, val;
2394 int x;
2395
2396 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2397
2398 val = 0;
2399 for (x = nbits; x > 0; x--) {
2400 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2401 delay(2);
2402 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2403 val |= (1U << (x - 1));
2404 CSR_WRITE(sc, WMREG_EECD, reg);
2405 delay(2);
2406 }
2407 *valp = val;
2408 }
2409
2410 /*
2411 * wm_read_eeprom_uwire:
2412 *
2413 * Read a word from the EEPROM using the MicroWire protocol.
2414 */
2415 static int
2416 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2417 {
2418 uint32_t reg, val;
2419 int i;
2420
2421 for (i = 0; i < wordcnt; i++) {
2422 /* Clear SK and DI. */
2423 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2424 CSR_WRITE(sc, WMREG_EECD, reg);
2425
2426 /* Set CHIP SELECT. */
2427 reg |= EECD_CS;
2428 CSR_WRITE(sc, WMREG_EECD, reg);
2429 delay(2);
2430
2431 /* Shift in the READ command. */
2432 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2433
2434 /* Shift in address. */
2435 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2436
2437 /* Shift out the data. */
2438 wm_eeprom_recvbits(sc, &val, 16);
2439 data[i] = val & 0xffff;
2440
2441 /* Clear CHIP SELECT. */
2442 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2443 CSR_WRITE(sc, WMREG_EECD, reg);
2444 delay(2);
2445 }
2446
2447 return (0);
2448 }
2449
2450 /*
2451 * wm_read_eeprom:
2452 *
2453 * Read data from the serial EEPROM.
2454 */
2455 static int
2456 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2457 {
2458 int rv;
2459
2460 if (wm_acquire_eeprom(sc))
2461 return (1);
2462
2463 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2464
2465 wm_release_eeprom(sc);
2466 return (rv);
2467 }
2468
2469 /*
2470 * wm_add_rxbuf:
2471 *
2472 * Add a receive buffer to the indiciated descriptor.
2473 */
2474 static int
2475 wm_add_rxbuf(struct wm_softc *sc, int idx)
2476 {
2477 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2478 struct mbuf *m;
2479 int error;
2480
2481 MGETHDR(m, M_DONTWAIT, MT_DATA);
2482 if (m == NULL)
2483 return (ENOBUFS);
2484
2485 MCLGET(m, M_DONTWAIT);
2486 if ((m->m_flags & M_EXT) == 0) {
2487 m_freem(m);
2488 return (ENOBUFS);
2489 }
2490
2491 if (rxs->rxs_mbuf != NULL)
2492 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2493
2494 rxs->rxs_mbuf = m;
2495
2496 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2497 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2498 BUS_DMA_READ|BUS_DMA_NOWAIT);
2499 if (error) {
2500 printf("%s: unable to load rx DMA map %d, error = %d\n",
2501 sc->sc_dev.dv_xname, idx, error);
2502 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2503 }
2504
2505 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2506 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2507
2508 WM_INIT_RXDESC(sc, idx);
2509
2510 return (0);
2511 }
2512
2513 /*
2514 * wm_set_ral:
2515 *
2516 * Set an entery in the receive address list.
2517 */
2518 static void
2519 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2520 {
2521 uint32_t ral_lo, ral_hi;
2522
2523 if (enaddr != NULL) {
2524 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2525 (enaddr[3] << 24);
2526 ral_hi = enaddr[4] | (enaddr[5] << 8);
2527 ral_hi |= RAL_AV;
2528 } else {
2529 ral_lo = 0;
2530 ral_hi = 0;
2531 }
2532
2533 if (sc->sc_type >= WM_T_82544) {
2534 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2535 ral_lo);
2536 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2537 ral_hi);
2538 } else {
2539 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2540 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2541 }
2542 }
2543
2544 /*
2545 * wm_mchash:
2546 *
2547 * Compute the hash of the multicast address for the 4096-bit
2548 * multicast filter.
2549 */
2550 static uint32_t
2551 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2552 {
2553 static const int lo_shift[4] = { 4, 3, 2, 0 };
2554 static const int hi_shift[4] = { 4, 5, 6, 8 };
2555 uint32_t hash;
2556
2557 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2558 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2559
2560 return (hash & 0xfff);
2561 }
2562
2563 /*
2564 * wm_set_filter:
2565 *
2566 * Set up the receive filter.
2567 */
2568 static void
2569 wm_set_filter(struct wm_softc *sc)
2570 {
2571 struct ethercom *ec = &sc->sc_ethercom;
2572 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2573 struct ether_multi *enm;
2574 struct ether_multistep step;
2575 bus_addr_t mta_reg;
2576 uint32_t hash, reg, bit;
2577 int i;
2578
2579 if (sc->sc_type >= WM_T_82544)
2580 mta_reg = WMREG_CORDOVA_MTA;
2581 else
2582 mta_reg = WMREG_MTA;
2583
2584 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2585
2586 if (ifp->if_flags & IFF_BROADCAST)
2587 sc->sc_rctl |= RCTL_BAM;
2588 if (ifp->if_flags & IFF_PROMISC) {
2589 sc->sc_rctl |= RCTL_UPE;
2590 goto allmulti;
2591 }
2592
2593 /*
2594 * Set the station address in the first RAL slot, and
2595 * clear the remaining slots.
2596 */
2597 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2598 for (i = 1; i < WM_RAL_TABSIZE; i++)
2599 wm_set_ral(sc, NULL, i);
2600
2601 /* Clear out the multicast table. */
2602 for (i = 0; i < WM_MC_TABSIZE; i++)
2603 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2604
2605 ETHER_FIRST_MULTI(step, ec, enm);
2606 while (enm != NULL) {
2607 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2608 /*
2609 * We must listen to a range of multicast addresses.
2610 * For now, just accept all multicasts, rather than
2611 * trying to set only those filter bits needed to match
2612 * the range. (At this time, the only use of address
2613 * ranges is for IP multicast routing, for which the
2614 * range is big enough to require all bits set.)
2615 */
2616 goto allmulti;
2617 }
2618
2619 hash = wm_mchash(sc, enm->enm_addrlo);
2620
2621 reg = (hash >> 5) & 0x7f;
2622 bit = hash & 0x1f;
2623
2624 hash = CSR_READ(sc, mta_reg + (reg << 2));
2625 hash |= 1U << bit;
2626
2627 /* XXX Hardware bug?? */
2628 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2629 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2630 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2631 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2632 } else
2633 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2634
2635 ETHER_NEXT_MULTI(step, enm);
2636 }
2637
2638 ifp->if_flags &= ~IFF_ALLMULTI;
2639 goto setit;
2640
2641 allmulti:
2642 ifp->if_flags |= IFF_ALLMULTI;
2643 sc->sc_rctl |= RCTL_MPE;
2644
2645 setit:
2646 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2647 }
2648
2649 /*
2650 * wm_tbi_mediainit:
2651 *
2652 * Initialize media for use on 1000BASE-X devices.
2653 */
2654 static void
2655 wm_tbi_mediainit(struct wm_softc *sc)
2656 {
2657 const char *sep = "";
2658
2659 if (sc->sc_type < WM_T_82543)
2660 sc->sc_tipg = TIPG_WM_DFLT;
2661 else
2662 sc->sc_tipg = TIPG_LG_DFLT;
2663
2664 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2665 wm_tbi_mediastatus);
2666
2667 /*
2668 * SWD Pins:
2669 *
2670 * 0 = Link LED (output)
2671 * 1 = Loss Of Signal (input)
2672 */
2673 sc->sc_ctrl |= CTRL_SWDPIO(0);
2674 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2675
2676 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2677
2678 #define ADD(ss, mm, dd) \
2679 do { \
2680 printf("%s%s", sep, ss); \
2681 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2682 sep = ", "; \
2683 } while (/*CONSTCOND*/0)
2684
2685 printf("%s: ", sc->sc_dev.dv_xname);
2686 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2687 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2688 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2689 printf("\n");
2690
2691 #undef ADD
2692
2693 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2694 }
2695
2696 /*
2697 * wm_tbi_mediastatus: [ifmedia interface function]
2698 *
2699 * Get the current interface media status on a 1000BASE-X device.
2700 */
2701 static void
2702 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2703 {
2704 struct wm_softc *sc = ifp->if_softc;
2705
2706 ifmr->ifm_status = IFM_AVALID;
2707 ifmr->ifm_active = IFM_ETHER;
2708
2709 if (sc->sc_tbi_linkup == 0) {
2710 ifmr->ifm_active |= IFM_NONE;
2711 return;
2712 }
2713
2714 ifmr->ifm_status |= IFM_ACTIVE;
2715 ifmr->ifm_active |= IFM_1000_SX;
2716 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2717 ifmr->ifm_active |= IFM_FDX;
2718 }
2719
2720 /*
2721 * wm_tbi_mediachange: [ifmedia interface function]
2722 *
2723 * Set hardware to newly-selected media on a 1000BASE-X device.
2724 */
2725 static int
2726 wm_tbi_mediachange(struct ifnet *ifp)
2727 {
2728 struct wm_softc *sc = ifp->if_softc;
2729 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2730 uint32_t status;
2731 int i;
2732
2733 sc->sc_txcw = ife->ifm_data;
2734 if (sc->sc_ctrl & CTRL_RFCE)
2735 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2736 if (sc->sc_ctrl & CTRL_TFCE)
2737 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2738 sc->sc_txcw |= TXCW_ANE;
2739
2740 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2741 delay(10000);
2742
2743 sc->sc_tbi_anstate = 0;
2744
2745 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2746 /* Have signal; wait for the link to come up. */
2747 for (i = 0; i < 50; i++) {
2748 delay(10000);
2749 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2750 break;
2751 }
2752
2753 status = CSR_READ(sc, WMREG_STATUS);
2754 if (status & STATUS_LU) {
2755 /* Link is up. */
2756 DPRINTF(WM_DEBUG_LINK,
2757 ("%s: LINK: set media -> link up %s\n",
2758 sc->sc_dev.dv_xname,
2759 (status & STATUS_FD) ? "FDX" : "HDX"));
2760 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2761 if (status & STATUS_FD)
2762 sc->sc_tctl |=
2763 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2764 else
2765 sc->sc_tctl |=
2766 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2767 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2768 sc->sc_tbi_linkup = 1;
2769 } else {
2770 /* Link is down. */
2771 DPRINTF(WM_DEBUG_LINK,
2772 ("%s: LINK: set media -> link down\n",
2773 sc->sc_dev.dv_xname));
2774 sc->sc_tbi_linkup = 0;
2775 }
2776 } else {
2777 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2778 sc->sc_dev.dv_xname));
2779 sc->sc_tbi_linkup = 0;
2780 }
2781
2782 wm_tbi_set_linkled(sc);
2783
2784 return (0);
2785 }
2786
2787 /*
2788 * wm_tbi_set_linkled:
2789 *
2790 * Update the link LED on 1000BASE-X devices.
2791 */
2792 static void
2793 wm_tbi_set_linkled(struct wm_softc *sc)
2794 {
2795
2796 if (sc->sc_tbi_linkup)
2797 sc->sc_ctrl |= CTRL_SWDPIN(0);
2798 else
2799 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2800
2801 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2802 }
2803
2804 /*
2805 * wm_tbi_check_link:
2806 *
2807 * Check the link on 1000BASE-X devices.
2808 */
2809 static void
2810 wm_tbi_check_link(struct wm_softc *sc)
2811 {
2812 uint32_t rxcw, ctrl, status;
2813
2814 if (sc->sc_tbi_anstate == 0)
2815 return;
2816 else if (sc->sc_tbi_anstate > 1) {
2817 DPRINTF(WM_DEBUG_LINK,
2818 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2819 sc->sc_tbi_anstate));
2820 sc->sc_tbi_anstate--;
2821 return;
2822 }
2823
2824 sc->sc_tbi_anstate = 0;
2825
2826 rxcw = CSR_READ(sc, WMREG_RXCW);
2827 ctrl = CSR_READ(sc, WMREG_CTRL);
2828 status = CSR_READ(sc, WMREG_STATUS);
2829
2830 if ((status & STATUS_LU) == 0) {
2831 DPRINTF(WM_DEBUG_LINK,
2832 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2833 sc->sc_tbi_linkup = 0;
2834 } else {
2835 DPRINTF(WM_DEBUG_LINK,
2836 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2837 (status & STATUS_FD) ? "FDX" : "HDX"));
2838 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2839 if (status & STATUS_FD)
2840 sc->sc_tctl |=
2841 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2842 else
2843 sc->sc_tctl |=
2844 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2845 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2846 sc->sc_tbi_linkup = 1;
2847 }
2848
2849 wm_tbi_set_linkled(sc);
2850 }
2851
2852 /*
2853 * wm_gmii_reset:
2854 *
2855 * Reset the PHY.
2856 */
2857 static void
2858 wm_gmii_reset(struct wm_softc *sc)
2859 {
2860 uint32_t reg;
2861
2862 if (sc->sc_type >= WM_T_82544) {
2863 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2864 delay(20000);
2865
2866 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2867 delay(20000);
2868 } else {
2869 /* The PHY reset pin is active-low. */
2870 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2871 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2872 CTRL_EXT_SWDPIN(4));
2873 reg |= CTRL_EXT_SWDPIO(4);
2874
2875 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2876 delay(10);
2877
2878 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2879 delay(10);
2880
2881 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2882 delay(10);
2883 #if 0
2884 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2885 #endif
2886 }
2887 }
2888
2889 /*
2890 * wm_gmii_mediainit:
2891 *
2892 * Initialize media for use on 1000BASE-T devices.
2893 */
2894 static void
2895 wm_gmii_mediainit(struct wm_softc *sc)
2896 {
2897 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2898
2899 /* We have MII. */
2900 sc->sc_flags |= WM_F_HAS_MII;
2901
2902 sc->sc_tipg = TIPG_1000T_DFLT;
2903
2904 /*
2905 * Let the chip set speed/duplex on its own based on
2906 * signals from the PHY.
2907 */
2908 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2909 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2910
2911 /* Initialize our media structures and probe the GMII. */
2912 sc->sc_mii.mii_ifp = ifp;
2913
2914 if (sc->sc_type >= WM_T_82544) {
2915 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2916 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2917 } else {
2918 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2919 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2920 }
2921 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2922
2923 wm_gmii_reset(sc);
2924
2925 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2926 wm_gmii_mediastatus);
2927
2928 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2929 MII_OFFSET_ANY, 0);
2930 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2931 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2932 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2933 } else
2934 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2935 }
2936
2937 /*
2938 * wm_gmii_mediastatus: [ifmedia interface function]
2939 *
2940 * Get the current interface media status on a 1000BASE-T device.
2941 */
2942 static void
2943 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2944 {
2945 struct wm_softc *sc = ifp->if_softc;
2946
2947 mii_pollstat(&sc->sc_mii);
2948 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2949 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2950 }
2951
2952 /*
2953 * wm_gmii_mediachange: [ifmedia interface function]
2954 *
2955 * Set hardware to newly-selected media on a 1000BASE-T device.
2956 */
2957 static int
2958 wm_gmii_mediachange(struct ifnet *ifp)
2959 {
2960 struct wm_softc *sc = ifp->if_softc;
2961
2962 if (ifp->if_flags & IFF_UP)
2963 mii_mediachg(&sc->sc_mii);
2964 return (0);
2965 }
2966
2967 #define MDI_IO CTRL_SWDPIN(2)
2968 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2969 #define MDI_CLK CTRL_SWDPIN(3)
2970
2971 static void
2972 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2973 {
2974 uint32_t i, v;
2975
2976 v = CSR_READ(sc, WMREG_CTRL);
2977 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2978 v |= MDI_DIR | CTRL_SWDPIO(3);
2979
2980 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2981 if (data & i)
2982 v |= MDI_IO;
2983 else
2984 v &= ~MDI_IO;
2985 CSR_WRITE(sc, WMREG_CTRL, v);
2986 delay(10);
2987 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2988 delay(10);
2989 CSR_WRITE(sc, WMREG_CTRL, v);
2990 delay(10);
2991 }
2992 }
2993
2994 static uint32_t
2995 i82543_mii_recvbits(struct wm_softc *sc)
2996 {
2997 uint32_t v, i, data = 0;
2998
2999 v = CSR_READ(sc, WMREG_CTRL);
3000 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3001 v |= CTRL_SWDPIO(3);
3002
3003 CSR_WRITE(sc, WMREG_CTRL, v);
3004 delay(10);
3005 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3006 delay(10);
3007 CSR_WRITE(sc, WMREG_CTRL, v);
3008 delay(10);
3009
3010 for (i = 0; i < 16; i++) {
3011 data <<= 1;
3012 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3013 delay(10);
3014 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3015 data |= 1;
3016 CSR_WRITE(sc, WMREG_CTRL, v);
3017 delay(10);
3018 }
3019
3020 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3021 delay(10);
3022 CSR_WRITE(sc, WMREG_CTRL, v);
3023 delay(10);
3024
3025 return (data);
3026 }
3027
3028 #undef MDI_IO
3029 #undef MDI_DIR
3030 #undef MDI_CLK
3031
3032 /*
3033 * wm_gmii_i82543_readreg: [mii interface function]
3034 *
3035 * Read a PHY register on the GMII (i82543 version).
3036 */
3037 static int
3038 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3039 {
3040 struct wm_softc *sc = (void *) self;
3041 int rv;
3042
3043 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3044 i82543_mii_sendbits(sc, reg | (phy << 5) |
3045 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3046 rv = i82543_mii_recvbits(sc) & 0xffff;
3047
3048 DPRINTF(WM_DEBUG_GMII,
3049 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3050 sc->sc_dev.dv_xname, phy, reg, rv));
3051
3052 return (rv);
3053 }
3054
3055 /*
3056 * wm_gmii_i82543_writereg: [mii interface function]
3057 *
3058 * Write a PHY register on the GMII (i82543 version).
3059 */
3060 static void
3061 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3062 {
3063 struct wm_softc *sc = (void *) self;
3064
3065 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3066 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3067 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3068 (MII_COMMAND_START << 30), 32);
3069 }
3070
3071 /*
3072 * wm_gmii_i82544_readreg: [mii interface function]
3073 *
3074 * Read a PHY register on the GMII.
3075 */
3076 static int
3077 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3078 {
3079 struct wm_softc *sc = (void *) self;
3080 uint32_t mdic;
3081 int i, rv;
3082
3083 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3084 MDIC_REGADD(reg));
3085
3086 for (i = 0; i < 100; i++) {
3087 mdic = CSR_READ(sc, WMREG_MDIC);
3088 if (mdic & MDIC_READY)
3089 break;
3090 delay(10);
3091 }
3092
3093 if ((mdic & MDIC_READY) == 0) {
3094 printf("%s: MDIC read timed out: phy %d reg %d\n",
3095 sc->sc_dev.dv_xname, phy, reg);
3096 rv = 0;
3097 } else if (mdic & MDIC_E) {
3098 #if 0 /* This is normal if no PHY is present. */
3099 printf("%s: MDIC read error: phy %d reg %d\n",
3100 sc->sc_dev.dv_xname, phy, reg);
3101 #endif
3102 rv = 0;
3103 } else {
3104 rv = MDIC_DATA(mdic);
3105 if (rv == 0xffff)
3106 rv = 0;
3107 }
3108
3109 return (rv);
3110 }
3111
3112 /*
3113 * wm_gmii_i82544_writereg: [mii interface function]
3114 *
3115 * Write a PHY register on the GMII.
3116 */
3117 static void
3118 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3119 {
3120 struct wm_softc *sc = (void *) self;
3121 uint32_t mdic;
3122 int i;
3123
3124 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3125 MDIC_REGADD(reg) | MDIC_DATA(val));
3126
3127 for (i = 0; i < 100; i++) {
3128 mdic = CSR_READ(sc, WMREG_MDIC);
3129 if (mdic & MDIC_READY)
3130 break;
3131 delay(10);
3132 }
3133
3134 if ((mdic & MDIC_READY) == 0)
3135 printf("%s: MDIC write timed out: phy %d reg %d\n",
3136 sc->sc_dev.dv_xname, phy, reg);
3137 else if (mdic & MDIC_E)
3138 printf("%s: MDIC write error: phy %d reg %d\n",
3139 sc->sc_dev.dv_xname, phy, reg);
3140 }
3141
3142 /*
3143 * wm_gmii_statchg: [mii interface function]
3144 *
3145 * Callback from MII layer when media changes.
3146 */
3147 static void
3148 wm_gmii_statchg(struct device *self)
3149 {
3150 struct wm_softc *sc = (void *) self;
3151
3152 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3153
3154 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3155 DPRINTF(WM_DEBUG_LINK,
3156 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3157 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3158 } else {
3159 DPRINTF(WM_DEBUG_LINK,
3160 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3161 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3162 }
3163
3164 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3165 }
3166