if_wm.c revision 1.55 1 /* $NetBSD: if_wm.c,v 1.55 2003/10/21 16:51:17 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.55 2003/10/21 16:51:17 thorpej Exp $");
48
49 #include "bpfilter.h"
50 #include "rnd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/callout.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/ioctl.h>
60 #include <sys/errno.h>
61 #include <sys/device.h>
62 #include <sys/queue.h>
63
64 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
65
66 #if NRND > 0
67 #include <sys/rnd.h>
68 #endif
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_ether.h>
74
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <netinet/in.h> /* XXX for struct ip */
80 #include <netinet/in_systm.h> /* XXX for struct ip */
81 #include <netinet/ip.h> /* XXX for struct ip */
82 #include <netinet/tcp.h> /* XXX for struct tcphdr */
83
84 #include <machine/bus.h>
85 #include <machine/intr.h>
86 #include <machine/endian.h>
87
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 #include <dev/mii/mii_bitbang.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95
96 #include <dev/pci/if_wmreg.h>
97
98 #ifdef WM_DEBUG
99 #define WM_DEBUG_LINK 0x01
100 #define WM_DEBUG_TX 0x02
101 #define WM_DEBUG_RX 0x04
102 #define WM_DEBUG_GMII 0x08
103 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
104
105 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
106 #else
107 #define DPRINTF(x, y) /* nothing */
108 #endif /* WM_DEBUG */
109
110 /*
111 * Transmit descriptor list size. Due to errata, we can only have
112 * 256 hardware descriptors in the ring. We tell the upper layers
113 * that they can queue a lot of packets, and we go ahead and manage
114 * up to 64 of them at a time. We allow up to 16 DMA segments per
115 * packet.
116 */
117 #define WM_NTXSEGS 16
118 #define WM_IFQUEUELEN 256
119 #define WM_TXQUEUELEN 64
120 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
121 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
122 #define WM_NTXDESC 256
123 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
124 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
125 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
126
127 /*
128 * Receive descriptor list size. We have one Rx buffer for normal
129 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
130 * packet. We allocate 256 receive descriptors, each with a 2k
131 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
132 */
133 #define WM_NRXDESC 256
134 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
135 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
136 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
137
138 /*
139 * Control structures are DMA'd to the i82542 chip. We allocate them in
140 * a single clump that maps to a single DMA segment to make serveral things
141 * easier.
142 */
143 struct wm_control_data {
144 /*
145 * The transmit descriptors.
146 */
147 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
148
149 /*
150 * The receive descriptors.
151 */
152 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
153 };
154
155 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
156 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
157 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
158
159 /*
160 * Software state for transmit jobs.
161 */
162 struct wm_txsoft {
163 struct mbuf *txs_mbuf; /* head of our mbuf chain */
164 bus_dmamap_t txs_dmamap; /* our DMA map */
165 int txs_firstdesc; /* first descriptor in packet */
166 int txs_lastdesc; /* last descriptor in packet */
167 int txs_ndesc; /* # of descriptors used */
168 };
169
170 /*
171 * Software state for receive buffers. Each descriptor gets a
172 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
173 * more than one buffer, we chain them together.
174 */
175 struct wm_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 typedef enum {
181 WM_T_unknown = 0,
182 WM_T_82542_2_0, /* i82542 2.0 (really old) */
183 WM_T_82542_2_1, /* i82542 2.1+ (old) */
184 WM_T_82543, /* i82543 */
185 WM_T_82544, /* i82544 */
186 WM_T_82540, /* i82540 */
187 WM_T_82545, /* i82545 */
188 WM_T_82545_3, /* i82545 3.0+ */
189 WM_T_82546, /* i82546 */
190 WM_T_82546_3, /* i82546 3.0+ */
191 WM_T_82541, /* i82541 */
192 WM_T_82541_2, /* i82541 2.0+ */
193 WM_T_82547, /* i82547 */
194 WM_T_82547_2, /* i82547 2.0+ */
195 } wm_chip_type;
196
197 /*
198 * Software state per device.
199 */
200 struct wm_softc {
201 struct device sc_dev; /* generic device information */
202 bus_space_tag_t sc_st; /* bus space tag */
203 bus_space_handle_t sc_sh; /* bus space handle */
204 bus_space_tag_t sc_iot; /* I/O space tag */
205 bus_space_handle_t sc_ioh; /* I/O space handle */
206 bus_dma_tag_t sc_dmat; /* bus DMA tag */
207 struct ethercom sc_ethercom; /* ethernet common data */
208 void *sc_sdhook; /* shutdown hook */
209
210 wm_chip_type sc_type; /* chip type */
211 int sc_flags; /* flags; see below */
212 int sc_bus_speed; /* PCI/PCIX bus speed */
213 int sc_pcix_offset; /* PCIX capability register offset */
214
215 void *sc_ih; /* interrupt cookie */
216
217 int sc_ee_addrbits; /* EEPROM address bits */
218
219 struct mii_data sc_mii; /* MII/media information */
220
221 struct callout sc_tick_ch; /* tick callout */
222
223 bus_dmamap_t sc_cddmamap; /* control data DMA map */
224 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
225
226 int sc_align_tweak;
227
228 /*
229 * Software state for the transmit and receive descriptors.
230 */
231 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
232 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
233
234 /*
235 * Control data structures.
236 */
237 struct wm_control_data *sc_control_data;
238 #define sc_txdescs sc_control_data->wcd_txdescs
239 #define sc_rxdescs sc_control_data->wcd_rxdescs
240
241 #ifdef WM_EVENT_COUNTERS
242 /* Event counters. */
243 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
244 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
245 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
246 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
247 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
248 struct evcnt sc_ev_rxintr; /* Rx interrupts */
249 struct evcnt sc_ev_linkintr; /* Link interrupts */
250
251 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
252 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
253 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
254 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
255
256 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
257 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
258 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
259
260 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
261 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
262
263 struct evcnt sc_ev_tu; /* Tx underrun */
264 #endif /* WM_EVENT_COUNTERS */
265
266 bus_addr_t sc_tdt_reg; /* offset of TDT register */
267
268 int sc_txfree; /* number of free Tx descriptors */
269 int sc_txnext; /* next ready Tx descriptor */
270
271 int sc_txsfree; /* number of free Tx jobs */
272 int sc_txsnext; /* next free Tx job */
273 int sc_txsdirty; /* dirty Tx jobs */
274
275 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
276 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
277
278 bus_addr_t sc_rdt_reg; /* offset of RDT register */
279
280 int sc_rxptr; /* next ready Rx descriptor/queue ent */
281 int sc_rxdiscard;
282 int sc_rxlen;
283 struct mbuf *sc_rxhead;
284 struct mbuf *sc_rxtail;
285 struct mbuf **sc_rxtailp;
286
287 uint32_t sc_ctrl; /* prototype CTRL register */
288 #if 0
289 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
290 #endif
291 uint32_t sc_icr; /* prototype interrupt bits */
292 uint32_t sc_tctl; /* prototype TCTL register */
293 uint32_t sc_rctl; /* prototype RCTL register */
294 uint32_t sc_txcw; /* prototype TXCW register */
295 uint32_t sc_tipg; /* prototype TIPG register */
296
297 int sc_tbi_linkup; /* TBI link status */
298 int sc_tbi_anstate; /* autonegotiation state */
299
300 int sc_mchash_type; /* multicast filter offset */
301
302 #if NRND > 0
303 rndsource_element_t rnd_source; /* random source */
304 #endif
305 };
306
307 #define WM_RXCHAIN_RESET(sc) \
308 do { \
309 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
310 *(sc)->sc_rxtailp = NULL; \
311 (sc)->sc_rxlen = 0; \
312 } while (/*CONSTCOND*/0)
313
314 #define WM_RXCHAIN_LINK(sc, m) \
315 do { \
316 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
317 (sc)->sc_rxtailp = &(m)->m_next; \
318 } while (/*CONSTCOND*/0)
319
320 /* sc_flags */
321 #define WM_F_HAS_MII 0x01 /* has MII */
322 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
323 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
324 #define WM_F_BUS64 0x20 /* bus is 64-bit */
325 #define WM_F_PCIX 0x40 /* bus is PCI-X */
326
327 #ifdef WM_EVENT_COUNTERS
328 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
329 #else
330 #define WM_EVCNT_INCR(ev) /* nothing */
331 #endif
332
333 #define CSR_READ(sc, reg) \
334 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
335 #define CSR_WRITE(sc, reg, val) \
336 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
337
338 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
339 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
340
341 #define WM_CDTXSYNC(sc, x, n, ops) \
342 do { \
343 int __x, __n; \
344 \
345 __x = (x); \
346 __n = (n); \
347 \
348 /* If it will wrap around, sync to the end of the ring. */ \
349 if ((__x + __n) > WM_NTXDESC) { \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
352 (WM_NTXDESC - __x), (ops)); \
353 __n -= (WM_NTXDESC - __x); \
354 __x = 0; \
355 } \
356 \
357 /* Now sync whatever is left. */ \
358 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
359 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
360 } while (/*CONSTCOND*/0)
361
362 #define WM_CDRXSYNC(sc, x, ops) \
363 do { \
364 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
365 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
366 } while (/*CONSTCOND*/0)
367
368 #define WM_INIT_RXDESC(sc, x) \
369 do { \
370 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
371 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
372 struct mbuf *__m = __rxs->rxs_mbuf; \
373 \
374 /* \
375 * Note: We scoot the packet forward 2 bytes in the buffer \
376 * so that the payload after the Ethernet header is aligned \
377 * to a 4-byte boundary. \
378 * \
379 * XXX BRAINDAMAGE ALERT! \
380 * The stupid chip uses the same size for every buffer, which \
381 * is set in the Receive Control register. We are using the 2K \
382 * size option, but what we REALLY want is (2K - 2)! For this \
383 * reason, we can't "scoot" packets longer than the standard \
384 * Ethernet MTU. On strict-alignment platforms, if the total \
385 * size exceeds (2K - 2) we set align_tweak to 0 and let \
386 * the upper layer copy the headers. \
387 */ \
388 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
389 \
390 __rxd->wrx_addr.wa_low = \
391 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
392 (sc)->sc_align_tweak); \
393 __rxd->wrx_addr.wa_high = 0; \
394 __rxd->wrx_len = 0; \
395 __rxd->wrx_cksum = 0; \
396 __rxd->wrx_status = 0; \
397 __rxd->wrx_errors = 0; \
398 __rxd->wrx_special = 0; \
399 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
400 \
401 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
402 } while (/*CONSTCOND*/0)
403
404 static void wm_start(struct ifnet *);
405 static void wm_watchdog(struct ifnet *);
406 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
407 static int wm_init(struct ifnet *);
408 static void wm_stop(struct ifnet *, int);
409
410 static void wm_shutdown(void *);
411
412 static void wm_reset(struct wm_softc *);
413 static void wm_rxdrain(struct wm_softc *);
414 static int wm_add_rxbuf(struct wm_softc *, int);
415 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
416 static void wm_tick(void *);
417
418 static void wm_set_filter(struct wm_softc *);
419
420 static int wm_intr(void *);
421 static void wm_txintr(struct wm_softc *);
422 static void wm_rxintr(struct wm_softc *);
423 static void wm_linkintr(struct wm_softc *, uint32_t);
424
425 static void wm_tbi_mediainit(struct wm_softc *);
426 static int wm_tbi_mediachange(struct ifnet *);
427 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
428
429 static void wm_tbi_set_linkled(struct wm_softc *);
430 static void wm_tbi_check_link(struct wm_softc *);
431
432 static void wm_gmii_reset(struct wm_softc *);
433
434 static int wm_gmii_i82543_readreg(struct device *, int, int);
435 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
436
437 static int wm_gmii_i82544_readreg(struct device *, int, int);
438 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
439
440 static void wm_gmii_statchg(struct device *);
441
442 static void wm_gmii_mediainit(struct wm_softc *);
443 static int wm_gmii_mediachange(struct ifnet *);
444 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
445
446 static int wm_match(struct device *, struct cfdata *, void *);
447 static void wm_attach(struct device *, struct device *, void *);
448
449 CFATTACH_DECL(wm, sizeof(struct wm_softc),
450 wm_match, wm_attach, NULL, NULL);
451
452 /*
453 * Devices supported by this driver.
454 */
455 const struct wm_product {
456 pci_vendor_id_t wmp_vendor;
457 pci_product_id_t wmp_product;
458 const char *wmp_name;
459 wm_chip_type wmp_type;
460 int wmp_flags;
461 #define WMP_F_1000X 0x01
462 #define WMP_F_1000T 0x02
463 } wm_products[] = {
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
465 "Intel i82542 1000BASE-X Ethernet",
466 WM_T_82542_2_1, WMP_F_1000X },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
469 "Intel i82543GC 1000BASE-X Ethernet",
470 WM_T_82543, WMP_F_1000X },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
473 "Intel i82543GC 1000BASE-T Ethernet",
474 WM_T_82543, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
477 "Intel i82544EI 1000BASE-T Ethernet",
478 WM_T_82544, WMP_F_1000T },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
481 "Intel i82544EI 1000BASE-X Ethernet",
482 WM_T_82544, WMP_F_1000X },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
485 "Intel i82544GC 1000BASE-T Ethernet",
486 WM_T_82544, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
489 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
490 WM_T_82544, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
493 "Intel i82540EM 1000BASE-T Ethernet",
494 WM_T_82540, WMP_F_1000T },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
497 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
498 WM_T_82540, WMP_F_1000T },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
501 "Intel i82540EP 1000BASE-T Ethernet",
502 WM_T_82540, WMP_F_1000T },
503
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
505 "Intel i82540EP 1000BASE-T Ethernet",
506 WM_T_82540, WMP_F_1000T },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
509 "Intel i82540EP 1000BASE-T Ethernet",
510 WM_T_82540, WMP_F_1000T },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
513 "Intel i82545EM 1000BASE-T Ethernet",
514 WM_T_82545, WMP_F_1000T },
515
516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
517 "Intel i82545GM 1000BASE-T Ethernet",
518 WM_T_82545_3, WMP_F_1000T },
519
520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
521 "Intel i82545GM 1000BASE-X Ethernet",
522 WM_T_82545_3, WMP_F_1000X },
523 #if 0
524 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
525 "Intel i82545GM Gigabit Ethernet (SERDES)",
526 WM_T_82545_3, WMP_F_SERDES },
527 #endif
528 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
529 "Intel i82546EB 1000BASE-T Ethernet",
530 WM_T_82546, WMP_F_1000T },
531
532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
533 "Intel i82546EB 1000BASE-T Ethernet",
534 WM_T_82546, WMP_F_1000T },
535
536 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
537 "Intel i82545EM 1000BASE-X Ethernet",
538 WM_T_82545, WMP_F_1000X },
539
540 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
541 "Intel i82546EB 1000BASE-X Ethernet",
542 WM_T_82546, WMP_F_1000X },
543
544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
545 "Intel i82546GB 1000BASE-T Ethernet",
546 WM_T_82546_3, WMP_F_1000T },
547
548 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
549 "Intel i82546GB 1000BASE-X Ethernet",
550 WM_T_82546_3, WMP_F_1000X },
551 #if 0
552 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
553 "Intel i82546GB Gigabit Ethernet (SERDES)",
554 WM_T_82546_3, WMP_F_SERDES },
555 #endif
556 { 0, 0,
557 NULL,
558 0, 0 },
559 };
560
561 #ifdef WM_EVENT_COUNTERS
562 #if WM_NTXSEGS != 16
563 #error Update wm_txseg_evcnt_names
564 #endif
565 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
566 "txseg1",
567 "txseg2",
568 "txseg3",
569 "txseg4",
570 "txseg5",
571 "txseg6",
572 "txseg7",
573 "txseg8",
574 "txseg9",
575 "txseg10",
576 "txseg11",
577 "txseg12",
578 "txseg13",
579 "txseg14",
580 "txseg15",
581 "txseg16",
582 };
583 #endif /* WM_EVENT_COUNTERS */
584
585 #if 0 /* Not currently used */
586 static __inline uint32_t
587 wm_io_read(struct wm_softc *sc, int reg)
588 {
589
590 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
591 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
592 }
593 #endif
594
595 static __inline void
596 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
597 {
598
599 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
600 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
601 }
602
603 static const struct wm_product *
604 wm_lookup(const struct pci_attach_args *pa)
605 {
606 const struct wm_product *wmp;
607
608 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
609 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
610 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
611 return (wmp);
612 }
613 return (NULL);
614 }
615
616 static int
617 wm_match(struct device *parent, struct cfdata *cf, void *aux)
618 {
619 struct pci_attach_args *pa = aux;
620
621 if (wm_lookup(pa) != NULL)
622 return (1);
623
624 return (0);
625 }
626
627 static void
628 wm_attach(struct device *parent, struct device *self, void *aux)
629 {
630 struct wm_softc *sc = (void *) self;
631 struct pci_attach_args *pa = aux;
632 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
633 pci_chipset_tag_t pc = pa->pa_pc;
634 pci_intr_handle_t ih;
635 const char *intrstr = NULL;
636 const char *eetype;
637 bus_space_tag_t memt;
638 bus_space_handle_t memh;
639 bus_dma_segment_t seg;
640 int memh_valid;
641 int i, rseg, error;
642 const struct wm_product *wmp;
643 uint8_t enaddr[ETHER_ADDR_LEN];
644 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
645 pcireg_t preg, memtype;
646 uint32_t reg;
647 int pmreg;
648
649 callout_init(&sc->sc_tick_ch);
650
651 wmp = wm_lookup(pa);
652 if (wmp == NULL) {
653 printf("\n");
654 panic("wm_attach: impossible");
655 }
656
657 sc->sc_dmat = pa->pa_dmat;
658
659 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
660 aprint_naive(": Ethernet controller\n");
661 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
662
663 sc->sc_type = wmp->wmp_type;
664 if (sc->sc_type < WM_T_82543) {
665 if (preg < 2) {
666 aprint_error("%s: i82542 must be at least rev. 2\n",
667 sc->sc_dev.dv_xname);
668 return;
669 }
670 if (preg < 3)
671 sc->sc_type = WM_T_82542_2_0;
672 }
673
674 /*
675 * Map the device. All devices support memory-mapped acccess,
676 * and it is really required for normal operation.
677 */
678 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
679 switch (memtype) {
680 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
681 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
682 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
683 memtype, 0, &memt, &memh, NULL, NULL) == 0);
684 break;
685 default:
686 memh_valid = 0;
687 }
688
689 if (memh_valid) {
690 sc->sc_st = memt;
691 sc->sc_sh = memh;
692 } else {
693 aprint_error("%s: unable to map device registers\n",
694 sc->sc_dev.dv_xname);
695 return;
696 }
697
698 /*
699 * In addition, i82544 and later support I/O mapped indirect
700 * register access. It is not desirable (nor supported in
701 * this driver) to use it for normal operation, though it is
702 * required to work around bugs in some chip versions.
703 */
704 if (sc->sc_type >= WM_T_82544) {
705 /* First we have to find the I/O BAR. */
706 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
707 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
708 PCI_MAPREG_TYPE_IO)
709 break;
710 }
711 if (i == PCI_MAPREG_END)
712 aprint_error("%s: WARNING: unable to find I/O BAR\n",
713 sc->sc_dev.dv_xname);
714 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
715 0, &sc->sc_iot, &sc->sc_ioh,
716 NULL, NULL) == 0)
717 sc->sc_flags |= WM_F_IOH_VALID;
718 else
719 aprint_error("%s: WARNING: unable to map I/O space\n",
720 sc->sc_dev.dv_xname);
721 }
722
723 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
724 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
725 preg |= PCI_COMMAND_MASTER_ENABLE;
726 if (sc->sc_type < WM_T_82542_2_1)
727 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
728 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
729
730 /* Get it out of power save mode, if needed. */
731 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
732 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
733 PCI_PMCSR_STATE_MASK;
734 if (preg == PCI_PMCSR_STATE_D3) {
735 /*
736 * The card has lost all configuration data in
737 * this state, so punt.
738 */
739 aprint_error("%s: unable to wake from power state D3\n",
740 sc->sc_dev.dv_xname);
741 return;
742 }
743 if (preg != PCI_PMCSR_STATE_D0) {
744 aprint_normal("%s: waking up from power state D%d\n",
745 sc->sc_dev.dv_xname, preg);
746 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
747 PCI_PMCSR_STATE_D0);
748 }
749 }
750
751 /*
752 * Map and establish our interrupt.
753 */
754 if (pci_intr_map(pa, &ih)) {
755 aprint_error("%s: unable to map interrupt\n",
756 sc->sc_dev.dv_xname);
757 return;
758 }
759 intrstr = pci_intr_string(pc, ih);
760 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
761 if (sc->sc_ih == NULL) {
762 aprint_error("%s: unable to establish interrupt",
763 sc->sc_dev.dv_xname);
764 if (intrstr != NULL)
765 aprint_normal(" at %s", intrstr);
766 aprint_normal("\n");
767 return;
768 }
769 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
770
771 /*
772 * Determine a few things about the bus we're connected to.
773 */
774 if (sc->sc_type < WM_T_82543) {
775 /* We don't really know the bus characteristics here. */
776 sc->sc_bus_speed = 33;
777 } else {
778 reg = CSR_READ(sc, WMREG_STATUS);
779 if (reg & STATUS_BUS64)
780 sc->sc_flags |= WM_F_BUS64;
781 if (sc->sc_type >= WM_T_82544 &&
782 (reg & STATUS_PCIX_MODE) != 0) {
783 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
784
785 sc->sc_flags |= WM_F_PCIX;
786 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
787 PCI_CAP_PCIX,
788 &sc->sc_pcix_offset, NULL) == 0)
789 aprint_error("%s: unable to find PCIX "
790 "capability\n", sc->sc_dev.dv_xname);
791 else if (sc->sc_type != WM_T_82545_3 &&
792 sc->sc_type != WM_T_82546_3) {
793 /*
794 * Work around a problem caused by the BIOS
795 * setting the max memory read byte count
796 * incorrectly.
797 */
798 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
799 sc->sc_pcix_offset + PCI_PCIX_CMD);
800 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
801 sc->sc_pcix_offset + PCI_PCIX_STATUS);
802
803 bytecnt =
804 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
805 PCI_PCIX_CMD_BYTECNT_SHIFT;
806 maxb =
807 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
808 PCI_PCIX_STATUS_MAXB_SHIFT;
809 if (bytecnt > maxb) {
810 aprint_verbose("%s: resetting PCI-X "
811 "MMRBC: %d -> %d\n",
812 sc->sc_dev.dv_xname,
813 512 << bytecnt, 512 << maxb);
814 pcix_cmd = (pcix_cmd &
815 ~PCI_PCIX_CMD_BYTECNT_MASK) |
816 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
817 pci_conf_write(pa->pa_pc, pa->pa_tag,
818 sc->sc_pcix_offset + PCI_PCIX_CMD,
819 pcix_cmd);
820 }
821 }
822 }
823 /*
824 * The quad port adapter is special; it has a PCIX-PCIX
825 * bridge on the board, and can run the secondary bus at
826 * a higher speed.
827 */
828 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
829 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
830 : 66;
831 } else if (sc->sc_flags & WM_F_PCIX) {
832 switch (STATUS_PCIXSPD(reg)) {
833 case STATUS_PCIXSPD_50_66:
834 sc->sc_bus_speed = 66;
835 break;
836 case STATUS_PCIXSPD_66_100:
837 sc->sc_bus_speed = 100;
838 break;
839 case STATUS_PCIXSPD_100_133:
840 sc->sc_bus_speed = 133;
841 break;
842 default:
843 aprint_error(
844 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
845 sc->sc_dev.dv_xname, STATUS_PCIXSPD(reg));
846 sc->sc_bus_speed = 66;
847 }
848 } else
849 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
850 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
851 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
852 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
853 }
854
855 /*
856 * Allocate the control data structures, and create and load the
857 * DMA map for it.
858 */
859 if ((error = bus_dmamem_alloc(sc->sc_dmat,
860 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
861 0)) != 0) {
862 aprint_error(
863 "%s: unable to allocate control data, error = %d\n",
864 sc->sc_dev.dv_xname, error);
865 goto fail_0;
866 }
867
868 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
869 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
870 0)) != 0) {
871 aprint_error("%s: unable to map control data, error = %d\n",
872 sc->sc_dev.dv_xname, error);
873 goto fail_1;
874 }
875
876 if ((error = bus_dmamap_create(sc->sc_dmat,
877 sizeof(struct wm_control_data), 1,
878 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
879 aprint_error("%s: unable to create control data DMA map, "
880 "error = %d\n", sc->sc_dev.dv_xname, error);
881 goto fail_2;
882 }
883
884 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
885 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
886 0)) != 0) {
887 aprint_error(
888 "%s: unable to load control data DMA map, error = %d\n",
889 sc->sc_dev.dv_xname, error);
890 goto fail_3;
891 }
892
893 /*
894 * Create the transmit buffer DMA maps.
895 */
896 for (i = 0; i < WM_TXQUEUELEN; i++) {
897 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
898 WM_NTXSEGS, MCLBYTES, 0, 0,
899 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
900 aprint_error("%s: unable to create Tx DMA map %d, "
901 "error = %d\n", sc->sc_dev.dv_xname, i, error);
902 goto fail_4;
903 }
904 }
905
906 /*
907 * Create the receive buffer DMA maps.
908 */
909 for (i = 0; i < WM_NRXDESC; i++) {
910 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
911 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
912 aprint_error("%s: unable to create Rx DMA map %d, "
913 "error = %d\n", sc->sc_dev.dv_xname, i, error);
914 goto fail_5;
915 }
916 sc->sc_rxsoft[i].rxs_mbuf = NULL;
917 }
918
919 /*
920 * Reset the chip to a known state.
921 */
922 wm_reset(sc);
923
924 /*
925 * Get some information about the EEPROM.
926 */
927 eetype = "MicroWire";
928 if (sc->sc_type >= WM_T_82540)
929 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
930 if (sc->sc_type <= WM_T_82544)
931 sc->sc_ee_addrbits = 6;
932 else if (sc->sc_type <= WM_T_82546_3) {
933 reg = CSR_READ(sc, WMREG_EECD);
934 if (reg & EECD_EE_SIZE)
935 sc->sc_ee_addrbits = 8;
936 else
937 sc->sc_ee_addrbits = 6;
938 }
939 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
940 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
941 sc->sc_ee_addrbits, eetype);
942
943 /*
944 * Read the Ethernet address from the EEPROM.
945 */
946 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
947 sizeof(myea) / sizeof(myea[0]), myea)) {
948 aprint_error("%s: unable to read Ethernet address\n",
949 sc->sc_dev.dv_xname);
950 return;
951 }
952 enaddr[0] = myea[0] & 0xff;
953 enaddr[1] = myea[0] >> 8;
954 enaddr[2] = myea[1] & 0xff;
955 enaddr[3] = myea[1] >> 8;
956 enaddr[4] = myea[2] & 0xff;
957 enaddr[5] = myea[2] >> 8;
958
959 /*
960 * Toggle the LSB of the MAC address on the second port
961 * of the i82546.
962 */
963 if (sc->sc_type == WM_T_82546) {
964 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
965 enaddr[5] ^= 1;
966 }
967
968 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
969 ether_sprintf(enaddr));
970
971 /*
972 * Read the config info from the EEPROM, and set up various
973 * bits in the control registers based on their contents.
974 */
975 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
976 aprint_error("%s: unable to read CFG1 from EEPROM\n",
977 sc->sc_dev.dv_xname);
978 return;
979 }
980 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
981 aprint_error("%s: unable to read CFG2 from EEPROM\n",
982 sc->sc_dev.dv_xname);
983 return;
984 }
985 if (sc->sc_type >= WM_T_82544) {
986 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
987 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
988 sc->sc_dev.dv_xname);
989 return;
990 }
991 }
992
993 if (cfg1 & EEPROM_CFG1_ILOS)
994 sc->sc_ctrl |= CTRL_ILOS;
995 if (sc->sc_type >= WM_T_82544) {
996 sc->sc_ctrl |=
997 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
998 CTRL_SWDPIO_SHIFT;
999 sc->sc_ctrl |=
1000 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1001 CTRL_SWDPINS_SHIFT;
1002 } else {
1003 sc->sc_ctrl |=
1004 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1005 CTRL_SWDPIO_SHIFT;
1006 }
1007
1008 #if 0
1009 if (sc->sc_type >= WM_T_82544) {
1010 if (cfg1 & EEPROM_CFG1_IPS0)
1011 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1012 if (cfg1 & EEPROM_CFG1_IPS1)
1013 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1014 sc->sc_ctrl_ext |=
1015 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1016 CTRL_EXT_SWDPIO_SHIFT;
1017 sc->sc_ctrl_ext |=
1018 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1019 CTRL_EXT_SWDPINS_SHIFT;
1020 } else {
1021 sc->sc_ctrl_ext |=
1022 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1023 CTRL_EXT_SWDPIO_SHIFT;
1024 }
1025 #endif
1026
1027 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1028 #if 0
1029 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1030 #endif
1031
1032 /*
1033 * Set up some register offsets that are different between
1034 * the i82542 and the i82543 and later chips.
1035 */
1036 if (sc->sc_type < WM_T_82543) {
1037 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1038 sc->sc_tdt_reg = WMREG_OLD_TDT;
1039 } else {
1040 sc->sc_rdt_reg = WMREG_RDT;
1041 sc->sc_tdt_reg = WMREG_TDT;
1042 }
1043
1044 /*
1045 * Determine if we should use flow control. We should
1046 * always use it, unless we're on a i82542 < 2.1.
1047 */
1048 if (sc->sc_type >= WM_T_82542_2_1)
1049 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1050
1051 /*
1052 * Determine if we're TBI or GMII mode, and initialize the
1053 * media structures accordingly.
1054 */
1055 if (sc->sc_type < WM_T_82543 ||
1056 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1057 if (wmp->wmp_flags & WMP_F_1000T)
1058 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1059 "product!\n", sc->sc_dev.dv_xname);
1060 wm_tbi_mediainit(sc);
1061 } else {
1062 if (wmp->wmp_flags & WMP_F_1000X)
1063 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1064 "product!\n", sc->sc_dev.dv_xname);
1065 wm_gmii_mediainit(sc);
1066 }
1067
1068 ifp = &sc->sc_ethercom.ec_if;
1069 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1070 ifp->if_softc = sc;
1071 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1072 ifp->if_ioctl = wm_ioctl;
1073 ifp->if_start = wm_start;
1074 ifp->if_watchdog = wm_watchdog;
1075 ifp->if_init = wm_init;
1076 ifp->if_stop = wm_stop;
1077 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
1078 IFQ_SET_READY(&ifp->if_snd);
1079
1080 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1081
1082 /*
1083 * If we're a i82543 or greater, we can support VLANs.
1084 */
1085 if (sc->sc_type >= WM_T_82543)
1086 sc->sc_ethercom.ec_capabilities |=
1087 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1088
1089 /*
1090 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1091 * on i82543 and later.
1092 */
1093 if (sc->sc_type >= WM_T_82543)
1094 ifp->if_capabilities |=
1095 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1096
1097 /*
1098 * Attach the interface.
1099 */
1100 if_attach(ifp);
1101 ether_ifattach(ifp, enaddr);
1102 #if NRND > 0
1103 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1104 RND_TYPE_NET, 0);
1105 #endif
1106
1107 #ifdef WM_EVENT_COUNTERS
1108 /* Attach event counters. */
1109 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1110 NULL, sc->sc_dev.dv_xname, "txsstall");
1111 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1112 NULL, sc->sc_dev.dv_xname, "txdstall");
1113 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1114 NULL, sc->sc_dev.dv_xname, "txforceintr");
1115 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1116 NULL, sc->sc_dev.dv_xname, "txdw");
1117 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1118 NULL, sc->sc_dev.dv_xname, "txqe");
1119 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1120 NULL, sc->sc_dev.dv_xname, "rxintr");
1121 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1122 NULL, sc->sc_dev.dv_xname, "linkintr");
1123
1124 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1125 NULL, sc->sc_dev.dv_xname, "rxipsum");
1126 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1127 NULL, sc->sc_dev.dv_xname, "rxtusum");
1128 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1129 NULL, sc->sc_dev.dv_xname, "txipsum");
1130 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1131 NULL, sc->sc_dev.dv_xname, "txtusum");
1132
1133 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1134 NULL, sc->sc_dev.dv_xname, "txctx init");
1135 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1136 NULL, sc->sc_dev.dv_xname, "txctx hit");
1137 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1138 NULL, sc->sc_dev.dv_xname, "txctx miss");
1139
1140 for (i = 0; i < WM_NTXSEGS; i++)
1141 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1142 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1143
1144 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1145 NULL, sc->sc_dev.dv_xname, "txdrop");
1146
1147 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1148 NULL, sc->sc_dev.dv_xname, "tu");
1149 #endif /* WM_EVENT_COUNTERS */
1150
1151 /*
1152 * Make sure the interface is shutdown during reboot.
1153 */
1154 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1155 if (sc->sc_sdhook == NULL)
1156 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1157 sc->sc_dev.dv_xname);
1158 return;
1159
1160 /*
1161 * Free any resources we've allocated during the failed attach
1162 * attempt. Do this in reverse order and fall through.
1163 */
1164 fail_5:
1165 for (i = 0; i < WM_NRXDESC; i++) {
1166 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1167 bus_dmamap_destroy(sc->sc_dmat,
1168 sc->sc_rxsoft[i].rxs_dmamap);
1169 }
1170 fail_4:
1171 for (i = 0; i < WM_TXQUEUELEN; i++) {
1172 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1173 bus_dmamap_destroy(sc->sc_dmat,
1174 sc->sc_txsoft[i].txs_dmamap);
1175 }
1176 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1177 fail_3:
1178 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1179 fail_2:
1180 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1181 sizeof(struct wm_control_data));
1182 fail_1:
1183 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1184 fail_0:
1185 return;
1186 }
1187
1188 /*
1189 * wm_shutdown:
1190 *
1191 * Make sure the interface is stopped at reboot time.
1192 */
1193 static void
1194 wm_shutdown(void *arg)
1195 {
1196 struct wm_softc *sc = arg;
1197
1198 wm_stop(&sc->sc_ethercom.ec_if, 1);
1199 }
1200
1201 /*
1202 * wm_tx_cksum:
1203 *
1204 * Set up TCP/IP checksumming parameters for the
1205 * specified packet.
1206 */
1207 static int
1208 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1209 uint32_t *fieldsp)
1210 {
1211 struct mbuf *m0 = txs->txs_mbuf;
1212 struct livengood_tcpip_ctxdesc *t;
1213 uint32_t fields = 0, ipcs, tucs;
1214 struct ip *ip;
1215 struct ether_header *eh;
1216 int offset, iphl;
1217
1218 /*
1219 * XXX It would be nice if the mbuf pkthdr had offset
1220 * fields for the protocol headers.
1221 */
1222
1223 eh = mtod(m0, struct ether_header *);
1224 switch (htons(eh->ether_type)) {
1225 case ETHERTYPE_IP:
1226 iphl = sizeof(struct ip);
1227 offset = ETHER_HDR_LEN;
1228 break;
1229
1230 case ETHERTYPE_VLAN:
1231 iphl = sizeof(struct ip);
1232 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1233 break;
1234
1235 default:
1236 /*
1237 * Don't support this protocol or encapsulation.
1238 */
1239 *fieldsp = 0;
1240 *cmdp = 0;
1241 return (0);
1242 }
1243
1244 if (m0->m_len < (offset + iphl)) {
1245 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1246 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1247 "packet dropped\n", sc->sc_dev.dv_xname);
1248 return (ENOMEM);
1249 }
1250 m0 = txs->txs_mbuf;
1251 }
1252
1253 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1254 iphl = ip->ip_hl << 2;
1255
1256 /*
1257 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1258 * offload feature, if we load the context descriptor, we
1259 * MUST provide valid values for IPCSS and TUCSS fields.
1260 */
1261
1262 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1263 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1264 fields |= htole32(WTX_IXSM);
1265 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1266 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1267 WTX_TCPIP_IPCSE(offset + iphl - 1));
1268 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1269 /* Use the cached value. */
1270 ipcs = sc->sc_txctx_ipcs;
1271 } else {
1272 /* Just initialize it to the likely value anyway. */
1273 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1274 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1275 WTX_TCPIP_IPCSE(offset + iphl - 1));
1276 }
1277
1278 offset += iphl;
1279
1280 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1281 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1282 fields |= htole32(WTX_TXSM);
1283 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1284 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1285 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1286 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1287 /* Use the cached value. */
1288 tucs = sc->sc_txctx_tucs;
1289 } else {
1290 /* Just initialize it to a valid TCP context. */
1291 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1292 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1293 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1294 }
1295
1296 if (sc->sc_txctx_ipcs == ipcs &&
1297 sc->sc_txctx_tucs == tucs) {
1298 /* Cached context is fine. */
1299 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1300 } else {
1301 /* Fill in the context descriptor. */
1302 #ifdef WM_EVENT_COUNTERS
1303 if (sc->sc_txctx_ipcs == 0xffffffff &&
1304 sc->sc_txctx_tucs == 0xffffffff)
1305 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1306 else
1307 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1308 #endif
1309 t = (struct livengood_tcpip_ctxdesc *)
1310 &sc->sc_txdescs[sc->sc_txnext];
1311 t->tcpip_ipcs = ipcs;
1312 t->tcpip_tucs = tucs;
1313 t->tcpip_cmdlen =
1314 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1315 t->tcpip_seg = 0;
1316 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1317
1318 sc->sc_txctx_ipcs = ipcs;
1319 sc->sc_txctx_tucs = tucs;
1320
1321 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1322 txs->txs_ndesc++;
1323 }
1324
1325 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1326 *fieldsp = fields;
1327
1328 return (0);
1329 }
1330
1331 /*
1332 * wm_start: [ifnet interface function]
1333 *
1334 * Start packet transmission on the interface.
1335 */
1336 static void
1337 wm_start(struct ifnet *ifp)
1338 {
1339 struct wm_softc *sc = ifp->if_softc;
1340 struct mbuf *m0;
1341 #if 0 /* XXXJRT */
1342 struct m_tag *mtag;
1343 #endif
1344 struct wm_txsoft *txs;
1345 bus_dmamap_t dmamap;
1346 int error, nexttx, lasttx, ofree, seg;
1347 uint32_t cksumcmd, cksumfields;
1348
1349 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1350 return;
1351
1352 /*
1353 * Remember the previous number of free descriptors.
1354 */
1355 ofree = sc->sc_txfree;
1356
1357 /*
1358 * Loop through the send queue, setting up transmit descriptors
1359 * until we drain the queue, or use up all available transmit
1360 * descriptors.
1361 */
1362 for (;;) {
1363 /* Grab a packet off the queue. */
1364 IFQ_POLL(&ifp->if_snd, m0);
1365 if (m0 == NULL)
1366 break;
1367
1368 DPRINTF(WM_DEBUG_TX,
1369 ("%s: TX: have packet to transmit: %p\n",
1370 sc->sc_dev.dv_xname, m0));
1371
1372 /* Get a work queue entry. */
1373 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1374 wm_txintr(sc);
1375 if (sc->sc_txsfree == 0) {
1376 DPRINTF(WM_DEBUG_TX,
1377 ("%s: TX: no free job descriptors\n",
1378 sc->sc_dev.dv_xname));
1379 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1380 break;
1381 }
1382 }
1383
1384 txs = &sc->sc_txsoft[sc->sc_txsnext];
1385 dmamap = txs->txs_dmamap;
1386
1387 /*
1388 * Load the DMA map. If this fails, the packet either
1389 * didn't fit in the allotted number of segments, or we
1390 * were short on resources. For the too-many-segments
1391 * case, we simply report an error and drop the packet,
1392 * since we can't sanely copy a jumbo packet to a single
1393 * buffer.
1394 */
1395 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1396 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1397 if (error) {
1398 if (error == EFBIG) {
1399 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1400 printf("%s: Tx packet consumes too many "
1401 "DMA segments, dropping...\n",
1402 sc->sc_dev.dv_xname);
1403 IFQ_DEQUEUE(&ifp->if_snd, m0);
1404 m_freem(m0);
1405 continue;
1406 }
1407 /*
1408 * Short on resources, just stop for now.
1409 */
1410 DPRINTF(WM_DEBUG_TX,
1411 ("%s: TX: dmamap load failed: %d\n",
1412 sc->sc_dev.dv_xname, error));
1413 break;
1414 }
1415
1416 /*
1417 * Ensure we have enough descriptors free to describe
1418 * the packet. Note, we always reserve one descriptor
1419 * at the end of the ring due to the semantics of the
1420 * TDT register, plus one more in the event we need
1421 * to re-load checksum offload context.
1422 */
1423 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1424 /*
1425 * Not enough free descriptors to transmit this
1426 * packet. We haven't committed anything yet,
1427 * so just unload the DMA map, put the packet
1428 * pack on the queue, and punt. Notify the upper
1429 * layer that there are no more slots left.
1430 */
1431 DPRINTF(WM_DEBUG_TX,
1432 ("%s: TX: need %d descriptors, have %d\n",
1433 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1434 sc->sc_txfree - 1));
1435 ifp->if_flags |= IFF_OACTIVE;
1436 bus_dmamap_unload(sc->sc_dmat, dmamap);
1437 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1438 break;
1439 }
1440
1441 IFQ_DEQUEUE(&ifp->if_snd, m0);
1442
1443 /*
1444 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1445 */
1446
1447 /* Sync the DMA map. */
1448 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1449 BUS_DMASYNC_PREWRITE);
1450
1451 DPRINTF(WM_DEBUG_TX,
1452 ("%s: TX: packet has %d DMA segments\n",
1453 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1454
1455 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1456
1457 /*
1458 * Store a pointer to the packet so that we can free it
1459 * later.
1460 *
1461 * Initially, we consider the number of descriptors the
1462 * packet uses the number of DMA segments. This may be
1463 * incremented by 1 if we do checksum offload (a descriptor
1464 * is used to set the checksum context).
1465 */
1466 txs->txs_mbuf = m0;
1467 txs->txs_firstdesc = sc->sc_txnext;
1468 txs->txs_ndesc = dmamap->dm_nsegs;
1469
1470 /*
1471 * Set up checksum offload parameters for
1472 * this packet.
1473 */
1474 if (m0->m_pkthdr.csum_flags &
1475 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1476 if (wm_tx_cksum(sc, txs, &cksumcmd,
1477 &cksumfields) != 0) {
1478 /* Error message already displayed. */
1479 bus_dmamap_unload(sc->sc_dmat, dmamap);
1480 continue;
1481 }
1482 } else {
1483 cksumcmd = 0;
1484 cksumfields = 0;
1485 }
1486
1487 cksumcmd |= htole32(WTX_CMD_IDE);
1488
1489 /*
1490 * Initialize the transmit descriptor.
1491 */
1492 for (nexttx = sc->sc_txnext, seg = 0;
1493 seg < dmamap->dm_nsegs;
1494 seg++, nexttx = WM_NEXTTX(nexttx)) {
1495 /*
1496 * Note: we currently only use 32-bit DMA
1497 * addresses.
1498 */
1499 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1500 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1501 htole32(dmamap->dm_segs[seg].ds_addr);
1502 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1503 htole32(dmamap->dm_segs[seg].ds_len);
1504 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1505 cksumfields;
1506 lasttx = nexttx;
1507
1508 DPRINTF(WM_DEBUG_TX,
1509 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1510 sc->sc_dev.dv_xname, nexttx,
1511 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1512 (uint32_t) dmamap->dm_segs[seg].ds_len));
1513 }
1514
1515 /*
1516 * Set up the command byte on the last descriptor of
1517 * the packet. If we're in the interrupt delay window,
1518 * delay the interrupt.
1519 */
1520 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1521 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1522
1523 #if 0 /* XXXJRT */
1524 /*
1525 * If VLANs are enabled and the packet has a VLAN tag, set
1526 * up the descriptor to encapsulate the packet for us.
1527 *
1528 * This is only valid on the last descriptor of the packet.
1529 */
1530 if (sc->sc_ethercom.ec_nvlans != 0 &&
1531 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1532 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1533 htole32(WTX_CMD_VLE);
1534 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1535 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1536 }
1537 #endif /* XXXJRT */
1538
1539 txs->txs_lastdesc = lasttx;
1540
1541 DPRINTF(WM_DEBUG_TX,
1542 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1543 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1544
1545 /* Sync the descriptors we're using. */
1546 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1547 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1548
1549 /* Give the packet to the chip. */
1550 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1551
1552 DPRINTF(WM_DEBUG_TX,
1553 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1554
1555 DPRINTF(WM_DEBUG_TX,
1556 ("%s: TX: finished transmitting packet, job %d\n",
1557 sc->sc_dev.dv_xname, sc->sc_txsnext));
1558
1559 /* Advance the tx pointer. */
1560 sc->sc_txfree -= txs->txs_ndesc;
1561 sc->sc_txnext = nexttx;
1562
1563 sc->sc_txsfree--;
1564 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1565
1566 #if NBPFILTER > 0
1567 /* Pass the packet to any BPF listeners. */
1568 if (ifp->if_bpf)
1569 bpf_mtap(ifp->if_bpf, m0);
1570 #endif /* NBPFILTER > 0 */
1571 }
1572
1573 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1574 /* No more slots; notify upper layer. */
1575 ifp->if_flags |= IFF_OACTIVE;
1576 }
1577
1578 if (sc->sc_txfree != ofree) {
1579 /* Set a watchdog timer in case the chip flakes out. */
1580 ifp->if_timer = 5;
1581 }
1582 }
1583
1584 /*
1585 * wm_watchdog: [ifnet interface function]
1586 *
1587 * Watchdog timer handler.
1588 */
1589 static void
1590 wm_watchdog(struct ifnet *ifp)
1591 {
1592 struct wm_softc *sc = ifp->if_softc;
1593
1594 /*
1595 * Since we're using delayed interrupts, sweep up
1596 * before we report an error.
1597 */
1598 wm_txintr(sc);
1599
1600 if (sc->sc_txfree != WM_NTXDESC) {
1601 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1602 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1603 sc->sc_txnext);
1604 ifp->if_oerrors++;
1605
1606 /* Reset the interface. */
1607 (void) wm_init(ifp);
1608 }
1609
1610 /* Try to get more packets going. */
1611 wm_start(ifp);
1612 }
1613
1614 /*
1615 * wm_ioctl: [ifnet interface function]
1616 *
1617 * Handle control requests from the operator.
1618 */
1619 static int
1620 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1621 {
1622 struct wm_softc *sc = ifp->if_softc;
1623 struct ifreq *ifr = (struct ifreq *) data;
1624 int s, error;
1625
1626 s = splnet();
1627
1628 switch (cmd) {
1629 case SIOCSIFMEDIA:
1630 case SIOCGIFMEDIA:
1631 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1632 break;
1633 default:
1634 error = ether_ioctl(ifp, cmd, data);
1635 if (error == ENETRESET) {
1636 /*
1637 * Multicast list has changed; set the hardware filter
1638 * accordingly.
1639 */
1640 wm_set_filter(sc);
1641 error = 0;
1642 }
1643 break;
1644 }
1645
1646 /* Try to get more packets going. */
1647 wm_start(ifp);
1648
1649 splx(s);
1650 return (error);
1651 }
1652
1653 /*
1654 * wm_intr:
1655 *
1656 * Interrupt service routine.
1657 */
1658 static int
1659 wm_intr(void *arg)
1660 {
1661 struct wm_softc *sc = arg;
1662 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1663 uint32_t icr;
1664 int wantinit, handled = 0;
1665
1666 for (wantinit = 0; wantinit == 0;) {
1667 icr = CSR_READ(sc, WMREG_ICR);
1668 if ((icr & sc->sc_icr) == 0)
1669 break;
1670
1671 #if 0 /*NRND > 0*/
1672 if (RND_ENABLED(&sc->rnd_source))
1673 rnd_add_uint32(&sc->rnd_source, icr);
1674 #endif
1675
1676 handled = 1;
1677
1678 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1679 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1680 DPRINTF(WM_DEBUG_RX,
1681 ("%s: RX: got Rx intr 0x%08x\n",
1682 sc->sc_dev.dv_xname,
1683 icr & (ICR_RXDMT0|ICR_RXT0)));
1684 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1685 }
1686 #endif
1687 wm_rxintr(sc);
1688
1689 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1690 if (icr & ICR_TXDW) {
1691 DPRINTF(WM_DEBUG_TX,
1692 ("%s: TX: got TDXW interrupt\n",
1693 sc->sc_dev.dv_xname));
1694 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1695 }
1696 #endif
1697 wm_txintr(sc);
1698
1699 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1700 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1701 wm_linkintr(sc, icr);
1702 }
1703
1704 if (icr & ICR_RXO) {
1705 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1706 wantinit = 1;
1707 }
1708 }
1709
1710 if (handled) {
1711 if (wantinit)
1712 wm_init(ifp);
1713
1714 /* Try to get more packets going. */
1715 wm_start(ifp);
1716 }
1717
1718 return (handled);
1719 }
1720
1721 /*
1722 * wm_txintr:
1723 *
1724 * Helper; handle transmit interrupts.
1725 */
1726 static void
1727 wm_txintr(struct wm_softc *sc)
1728 {
1729 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1730 struct wm_txsoft *txs;
1731 uint8_t status;
1732 int i;
1733
1734 ifp->if_flags &= ~IFF_OACTIVE;
1735
1736 /*
1737 * Go through the Tx list and free mbufs for those
1738 * frames which have been transmitted.
1739 */
1740 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1741 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1742 txs = &sc->sc_txsoft[i];
1743
1744 DPRINTF(WM_DEBUG_TX,
1745 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1746
1747 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1748 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1749
1750 status = le32toh(sc->sc_txdescs[
1751 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1752 if ((status & WTX_ST_DD) == 0) {
1753 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1754 BUS_DMASYNC_PREREAD);
1755 break;
1756 }
1757
1758 DPRINTF(WM_DEBUG_TX,
1759 ("%s: TX: job %d done: descs %d..%d\n",
1760 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1761 txs->txs_lastdesc));
1762
1763 /*
1764 * XXX We should probably be using the statistics
1765 * XXX registers, but I don't know if they exist
1766 * XXX on chips before the i82544.
1767 */
1768
1769 #ifdef WM_EVENT_COUNTERS
1770 if (status & WTX_ST_TU)
1771 WM_EVCNT_INCR(&sc->sc_ev_tu);
1772 #endif /* WM_EVENT_COUNTERS */
1773
1774 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1775 ifp->if_oerrors++;
1776 if (status & WTX_ST_LC)
1777 printf("%s: late collision\n",
1778 sc->sc_dev.dv_xname);
1779 else if (status & WTX_ST_EC) {
1780 ifp->if_collisions += 16;
1781 printf("%s: excessive collisions\n",
1782 sc->sc_dev.dv_xname);
1783 }
1784 } else
1785 ifp->if_opackets++;
1786
1787 sc->sc_txfree += txs->txs_ndesc;
1788 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1789 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1790 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1791 m_freem(txs->txs_mbuf);
1792 txs->txs_mbuf = NULL;
1793 }
1794
1795 /* Update the dirty transmit buffer pointer. */
1796 sc->sc_txsdirty = i;
1797 DPRINTF(WM_DEBUG_TX,
1798 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1799
1800 /*
1801 * If there are no more pending transmissions, cancel the watchdog
1802 * timer.
1803 */
1804 if (sc->sc_txsfree == WM_TXQUEUELEN)
1805 ifp->if_timer = 0;
1806 }
1807
1808 /*
1809 * wm_rxintr:
1810 *
1811 * Helper; handle receive interrupts.
1812 */
1813 static void
1814 wm_rxintr(struct wm_softc *sc)
1815 {
1816 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1817 struct wm_rxsoft *rxs;
1818 struct mbuf *m;
1819 int i, len;
1820 uint8_t status, errors;
1821
1822 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1823 rxs = &sc->sc_rxsoft[i];
1824
1825 DPRINTF(WM_DEBUG_RX,
1826 ("%s: RX: checking descriptor %d\n",
1827 sc->sc_dev.dv_xname, i));
1828
1829 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1830
1831 status = sc->sc_rxdescs[i].wrx_status;
1832 errors = sc->sc_rxdescs[i].wrx_errors;
1833 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1834
1835 if ((status & WRX_ST_DD) == 0) {
1836 /*
1837 * We have processed all of the receive descriptors.
1838 */
1839 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1840 break;
1841 }
1842
1843 if (__predict_false(sc->sc_rxdiscard)) {
1844 DPRINTF(WM_DEBUG_RX,
1845 ("%s: RX: discarding contents of descriptor %d\n",
1846 sc->sc_dev.dv_xname, i));
1847 WM_INIT_RXDESC(sc, i);
1848 if (status & WRX_ST_EOP) {
1849 /* Reset our state. */
1850 DPRINTF(WM_DEBUG_RX,
1851 ("%s: RX: resetting rxdiscard -> 0\n",
1852 sc->sc_dev.dv_xname));
1853 sc->sc_rxdiscard = 0;
1854 }
1855 continue;
1856 }
1857
1858 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1859 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1860
1861 m = rxs->rxs_mbuf;
1862
1863 /*
1864 * Add a new receive buffer to the ring.
1865 */
1866 if (wm_add_rxbuf(sc, i) != 0) {
1867 /*
1868 * Failed, throw away what we've done so
1869 * far, and discard the rest of the packet.
1870 */
1871 ifp->if_ierrors++;
1872 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1873 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1874 WM_INIT_RXDESC(sc, i);
1875 if ((status & WRX_ST_EOP) == 0)
1876 sc->sc_rxdiscard = 1;
1877 if (sc->sc_rxhead != NULL)
1878 m_freem(sc->sc_rxhead);
1879 WM_RXCHAIN_RESET(sc);
1880 DPRINTF(WM_DEBUG_RX,
1881 ("%s: RX: Rx buffer allocation failed, "
1882 "dropping packet%s\n", sc->sc_dev.dv_xname,
1883 sc->sc_rxdiscard ? " (discard)" : ""));
1884 continue;
1885 }
1886
1887 WM_RXCHAIN_LINK(sc, m);
1888
1889 m->m_len = len;
1890
1891 DPRINTF(WM_DEBUG_RX,
1892 ("%s: RX: buffer at %p len %d\n",
1893 sc->sc_dev.dv_xname, m->m_data, len));
1894
1895 /*
1896 * If this is not the end of the packet, keep
1897 * looking.
1898 */
1899 if ((status & WRX_ST_EOP) == 0) {
1900 sc->sc_rxlen += len;
1901 DPRINTF(WM_DEBUG_RX,
1902 ("%s: RX: not yet EOP, rxlen -> %d\n",
1903 sc->sc_dev.dv_xname, sc->sc_rxlen));
1904 continue;
1905 }
1906
1907 /*
1908 * Okay, we have the entire packet now...
1909 */
1910 *sc->sc_rxtailp = NULL;
1911 m = sc->sc_rxhead;
1912 len += sc->sc_rxlen;
1913
1914 WM_RXCHAIN_RESET(sc);
1915
1916 DPRINTF(WM_DEBUG_RX,
1917 ("%s: RX: have entire packet, len -> %d\n",
1918 sc->sc_dev.dv_xname, len));
1919
1920 /*
1921 * If an error occurred, update stats and drop the packet.
1922 */
1923 if (errors &
1924 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1925 ifp->if_ierrors++;
1926 if (errors & WRX_ER_SE)
1927 printf("%s: symbol error\n",
1928 sc->sc_dev.dv_xname);
1929 else if (errors & WRX_ER_SEQ)
1930 printf("%s: receive sequence error\n",
1931 sc->sc_dev.dv_xname);
1932 else if (errors & WRX_ER_CE)
1933 printf("%s: CRC error\n",
1934 sc->sc_dev.dv_xname);
1935 m_freem(m);
1936 continue;
1937 }
1938
1939 /*
1940 * No errors. Receive the packet.
1941 *
1942 * Note, we have configured the chip to include the
1943 * CRC with every packet.
1944 */
1945 m->m_flags |= M_HASFCS;
1946 m->m_pkthdr.rcvif = ifp;
1947 m->m_pkthdr.len = len;
1948
1949 #if 0 /* XXXJRT */
1950 /*
1951 * If VLANs are enabled, VLAN packets have been unwrapped
1952 * for us. Associate the tag with the packet.
1953 */
1954 if (sc->sc_ethercom.ec_nvlans != 0 &&
1955 (status & WRX_ST_VP) != 0) {
1956 struct m_tag *vtag;
1957
1958 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1959 M_NOWAIT);
1960 if (vtag == NULL) {
1961 ifp->if_ierrors++;
1962 printf("%s: unable to allocate VLAN tag\n",
1963 sc->sc_dev.dv_xname);
1964 m_freem(m);
1965 continue;
1966 }
1967
1968 *(u_int *)(vtag + 1) =
1969 le16toh(sc->sc_rxdescs[i].wrx_special);
1970 }
1971 #endif /* XXXJRT */
1972
1973 /*
1974 * Set up checksum info for this packet.
1975 */
1976 if (status & WRX_ST_IPCS) {
1977 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1978 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1979 if (errors & WRX_ER_IPE)
1980 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1981 }
1982 if (status & WRX_ST_TCPCS) {
1983 /*
1984 * Note: we don't know if this was TCP or UDP,
1985 * so we just set both bits, and expect the
1986 * upper layers to deal.
1987 */
1988 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1989 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1990 if (errors & WRX_ER_TCPE)
1991 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1992 }
1993
1994 ifp->if_ipackets++;
1995
1996 #if NBPFILTER > 0
1997 /* Pass this up to any BPF listeners. */
1998 if (ifp->if_bpf)
1999 bpf_mtap(ifp->if_bpf, m);
2000 #endif /* NBPFILTER > 0 */
2001
2002 /* Pass it on. */
2003 (*ifp->if_input)(ifp, m);
2004 }
2005
2006 /* Update the receive pointer. */
2007 sc->sc_rxptr = i;
2008
2009 DPRINTF(WM_DEBUG_RX,
2010 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2011 }
2012
2013 /*
2014 * wm_linkintr:
2015 *
2016 * Helper; handle link interrupts.
2017 */
2018 static void
2019 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2020 {
2021 uint32_t status;
2022
2023 /*
2024 * If we get a link status interrupt on a 1000BASE-T
2025 * device, just fall into the normal MII tick path.
2026 */
2027 if (sc->sc_flags & WM_F_HAS_MII) {
2028 if (icr & ICR_LSC) {
2029 DPRINTF(WM_DEBUG_LINK,
2030 ("%s: LINK: LSC -> mii_tick\n",
2031 sc->sc_dev.dv_xname));
2032 mii_tick(&sc->sc_mii);
2033 } else if (icr & ICR_RXSEQ) {
2034 DPRINTF(WM_DEBUG_LINK,
2035 ("%s: LINK Receive sequence error\n",
2036 sc->sc_dev.dv_xname));
2037 }
2038 return;
2039 }
2040
2041 /*
2042 * If we are now receiving /C/, check for link again in
2043 * a couple of link clock ticks.
2044 */
2045 if (icr & ICR_RXCFG) {
2046 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2047 sc->sc_dev.dv_xname));
2048 sc->sc_tbi_anstate = 2;
2049 }
2050
2051 if (icr & ICR_LSC) {
2052 status = CSR_READ(sc, WMREG_STATUS);
2053 if (status & STATUS_LU) {
2054 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2055 sc->sc_dev.dv_xname,
2056 (status & STATUS_FD) ? "FDX" : "HDX"));
2057 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2058 if (status & STATUS_FD)
2059 sc->sc_tctl |=
2060 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2061 else
2062 sc->sc_tctl |=
2063 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2064 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2065 sc->sc_tbi_linkup = 1;
2066 } else {
2067 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2068 sc->sc_dev.dv_xname));
2069 sc->sc_tbi_linkup = 0;
2070 }
2071 sc->sc_tbi_anstate = 2;
2072 wm_tbi_set_linkled(sc);
2073 } else if (icr & ICR_RXSEQ) {
2074 DPRINTF(WM_DEBUG_LINK,
2075 ("%s: LINK: Receive sequence error\n",
2076 sc->sc_dev.dv_xname));
2077 }
2078 }
2079
2080 /*
2081 * wm_tick:
2082 *
2083 * One second timer, used to check link status, sweep up
2084 * completed transmit jobs, etc.
2085 */
2086 static void
2087 wm_tick(void *arg)
2088 {
2089 struct wm_softc *sc = arg;
2090 int s;
2091
2092 s = splnet();
2093
2094 if (sc->sc_flags & WM_F_HAS_MII)
2095 mii_tick(&sc->sc_mii);
2096 else
2097 wm_tbi_check_link(sc);
2098
2099 splx(s);
2100
2101 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2102 }
2103
2104 /*
2105 * wm_reset:
2106 *
2107 * Reset the i82542 chip.
2108 */
2109 static void
2110 wm_reset(struct wm_softc *sc)
2111 {
2112 int i;
2113
2114 switch (sc->sc_type) {
2115 case WM_T_82544:
2116 case WM_T_82540:
2117 case WM_T_82545:
2118 case WM_T_82546:
2119 case WM_T_82541:
2120 case WM_T_82541_2:
2121 /*
2122 * These chips have a problem with the memory-mapped
2123 * write cycle when issuing the reset, so use I/O-mapped
2124 * access, if possible.
2125 */
2126 if (sc->sc_flags & WM_F_IOH_VALID)
2127 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2128 else
2129 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2130 break;
2131
2132 case WM_T_82545_3:
2133 case WM_T_82546_3:
2134 /* Use the shadow control register on these chips. */
2135 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2136 break;
2137
2138 default:
2139 /* Everything else can safely use the documented method. */
2140 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2141 break;
2142 }
2143 delay(10000);
2144
2145 for (i = 0; i < 1000; i++) {
2146 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2147 return;
2148 delay(20);
2149 }
2150
2151 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2152 printf("%s: WARNING: reset failed to complete\n",
2153 sc->sc_dev.dv_xname);
2154 }
2155
2156 /*
2157 * wm_init: [ifnet interface function]
2158 *
2159 * Initialize the interface. Must be called at splnet().
2160 */
2161 static int
2162 wm_init(struct ifnet *ifp)
2163 {
2164 struct wm_softc *sc = ifp->if_softc;
2165 struct wm_rxsoft *rxs;
2166 int i, error = 0;
2167 uint32_t reg;
2168
2169 /*
2170 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2171 * There is a small but measurable benefit to avoiding the adjusment
2172 * of the descriptor so that the headers are aligned, for normal mtu,
2173 * on such platforms. One possibility is that the DMA itself is
2174 * slightly more efficient if the front of the entire packet (instead
2175 * of the front of the headers) is aligned.
2176 *
2177 * Note we must always set align_tweak to 0 if we are using
2178 * jumbo frames.
2179 */
2180 #ifdef __NO_STRICT_ALIGNMENT
2181 sc->sc_align_tweak = 0;
2182 #else
2183 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2184 sc->sc_align_tweak = 0;
2185 else
2186 sc->sc_align_tweak = 2;
2187 #endif /* __NO_STRICT_ALIGNMENT */
2188
2189 /* Cancel any pending I/O. */
2190 wm_stop(ifp, 0);
2191
2192 /* Reset the chip to a known state. */
2193 wm_reset(sc);
2194
2195 /* Initialize the transmit descriptor ring. */
2196 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2197 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2198 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2199 sc->sc_txfree = WM_NTXDESC;
2200 sc->sc_txnext = 0;
2201
2202 sc->sc_txctx_ipcs = 0xffffffff;
2203 sc->sc_txctx_tucs = 0xffffffff;
2204
2205 if (sc->sc_type < WM_T_82543) {
2206 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2207 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2208 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2209 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2210 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2211 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2212 } else {
2213 CSR_WRITE(sc, WMREG_TBDAH, 0);
2214 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2215 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2216 CSR_WRITE(sc, WMREG_TDH, 0);
2217 CSR_WRITE(sc, WMREG_TDT, 0);
2218 CSR_WRITE(sc, WMREG_TIDV, 128);
2219
2220 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2221 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2222 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2223 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2224 }
2225 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2226 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2227
2228 /* Initialize the transmit job descriptors. */
2229 for (i = 0; i < WM_TXQUEUELEN; i++)
2230 sc->sc_txsoft[i].txs_mbuf = NULL;
2231 sc->sc_txsfree = WM_TXQUEUELEN;
2232 sc->sc_txsnext = 0;
2233 sc->sc_txsdirty = 0;
2234
2235 /*
2236 * Initialize the receive descriptor and receive job
2237 * descriptor rings.
2238 */
2239 if (sc->sc_type < WM_T_82543) {
2240 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2241 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2242 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2243 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2244 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2245 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2246
2247 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2248 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2249 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2250 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2251 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2252 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2253 } else {
2254 CSR_WRITE(sc, WMREG_RDBAH, 0);
2255 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2256 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2257 CSR_WRITE(sc, WMREG_RDH, 0);
2258 CSR_WRITE(sc, WMREG_RDT, 0);
2259 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2260 }
2261 for (i = 0; i < WM_NRXDESC; i++) {
2262 rxs = &sc->sc_rxsoft[i];
2263 if (rxs->rxs_mbuf == NULL) {
2264 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2265 printf("%s: unable to allocate or map rx "
2266 "buffer %d, error = %d\n",
2267 sc->sc_dev.dv_xname, i, error);
2268 /*
2269 * XXX Should attempt to run with fewer receive
2270 * XXX buffers instead of just failing.
2271 */
2272 wm_rxdrain(sc);
2273 goto out;
2274 }
2275 } else
2276 WM_INIT_RXDESC(sc, i);
2277 }
2278 sc->sc_rxptr = 0;
2279 sc->sc_rxdiscard = 0;
2280 WM_RXCHAIN_RESET(sc);
2281
2282 /*
2283 * Clear out the VLAN table -- we don't use it (yet).
2284 */
2285 CSR_WRITE(sc, WMREG_VET, 0);
2286 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2287 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2288
2289 /*
2290 * Set up flow-control parameters.
2291 *
2292 * XXX Values could probably stand some tuning.
2293 */
2294 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2295 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2296 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2297 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2298
2299 if (sc->sc_type < WM_T_82543) {
2300 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2301 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2302 } else {
2303 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2304 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2305 }
2306 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2307 }
2308
2309 #if 0 /* XXXJRT */
2310 /* Deal with VLAN enables. */
2311 if (sc->sc_ethercom.ec_nvlans != 0)
2312 sc->sc_ctrl |= CTRL_VME;
2313 else
2314 #endif /* XXXJRT */
2315 sc->sc_ctrl &= ~CTRL_VME;
2316
2317 /* Write the control registers. */
2318 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2319 #if 0
2320 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2321 #endif
2322
2323 /*
2324 * Set up checksum offload parameters.
2325 */
2326 reg = CSR_READ(sc, WMREG_RXCSUM);
2327 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2328 reg |= RXCSUM_IPOFL;
2329 else
2330 reg &= ~RXCSUM_IPOFL;
2331 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2332 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2333 else {
2334 reg &= ~RXCSUM_TUOFL;
2335 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2336 reg &= ~RXCSUM_IPOFL;
2337 }
2338 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2339
2340 /*
2341 * Set up the interrupt registers.
2342 */
2343 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2344 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2345 ICR_RXO | ICR_RXT0;
2346 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2347 sc->sc_icr |= ICR_RXCFG;
2348 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2349
2350 /* Set up the inter-packet gap. */
2351 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2352
2353 #if 0 /* XXXJRT */
2354 /* Set the VLAN ethernetype. */
2355 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2356 #endif
2357
2358 /*
2359 * Set up the transmit control register; we start out with
2360 * a collision distance suitable for FDX, but update it whe
2361 * we resolve the media type.
2362 */
2363 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2364 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2365 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2366
2367 /* Set the media. */
2368 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2369
2370 /*
2371 * Set up the receive control register; we actually program
2372 * the register when we set the receive filter. Use multicast
2373 * address offset type 0.
2374 *
2375 * Only the i82544 has the ability to strip the incoming
2376 * CRC, so we don't enable that feature.
2377 */
2378 sc->sc_mchash_type = 0;
2379 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2380 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2381
2382 if(MCLBYTES == 2048) {
2383 sc->sc_rctl |= RCTL_2k;
2384 } else {
2385 /*
2386 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2387 * XXX segments, dropping" -- why?
2388 */
2389 #if 0
2390 if(sc->sc_type >= WM_T_82543) {
2391 switch(MCLBYTES) {
2392 case 4096:
2393 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2394 break;
2395 case 8192:
2396 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2397 break;
2398 case 16384:
2399 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2400 break;
2401 default:
2402 panic("wm_init: MCLBYTES %d unsupported",
2403 MCLBYTES);
2404 break;
2405 }
2406 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2407 #else
2408 panic("wm_init: MCLBYTES > 2048 not supported.");
2409 #endif
2410 }
2411
2412 /* Set the receive filter. */
2413 wm_set_filter(sc);
2414
2415 /* Start the one second link check clock. */
2416 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2417
2418 /* ...all done! */
2419 ifp->if_flags |= IFF_RUNNING;
2420 ifp->if_flags &= ~IFF_OACTIVE;
2421
2422 out:
2423 if (error)
2424 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2425 return (error);
2426 }
2427
2428 /*
2429 * wm_rxdrain:
2430 *
2431 * Drain the receive queue.
2432 */
2433 static void
2434 wm_rxdrain(struct wm_softc *sc)
2435 {
2436 struct wm_rxsoft *rxs;
2437 int i;
2438
2439 for (i = 0; i < WM_NRXDESC; i++) {
2440 rxs = &sc->sc_rxsoft[i];
2441 if (rxs->rxs_mbuf != NULL) {
2442 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2443 m_freem(rxs->rxs_mbuf);
2444 rxs->rxs_mbuf = NULL;
2445 }
2446 }
2447 }
2448
2449 /*
2450 * wm_stop: [ifnet interface function]
2451 *
2452 * Stop transmission on the interface.
2453 */
2454 static void
2455 wm_stop(struct ifnet *ifp, int disable)
2456 {
2457 struct wm_softc *sc = ifp->if_softc;
2458 struct wm_txsoft *txs;
2459 int i;
2460
2461 /* Stop the one second clock. */
2462 callout_stop(&sc->sc_tick_ch);
2463
2464 if (sc->sc_flags & WM_F_HAS_MII) {
2465 /* Down the MII. */
2466 mii_down(&sc->sc_mii);
2467 }
2468
2469 /* Stop the transmit and receive processes. */
2470 CSR_WRITE(sc, WMREG_TCTL, 0);
2471 CSR_WRITE(sc, WMREG_RCTL, 0);
2472
2473 /* Release any queued transmit buffers. */
2474 for (i = 0; i < WM_TXQUEUELEN; i++) {
2475 txs = &sc->sc_txsoft[i];
2476 if (txs->txs_mbuf != NULL) {
2477 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2478 m_freem(txs->txs_mbuf);
2479 txs->txs_mbuf = NULL;
2480 }
2481 }
2482
2483 if (disable)
2484 wm_rxdrain(sc);
2485
2486 /* Mark the interface as down and cancel the watchdog timer. */
2487 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2488 ifp->if_timer = 0;
2489 }
2490
2491 /*
2492 * wm_acquire_eeprom:
2493 *
2494 * Perform the EEPROM handshake required on some chips.
2495 */
2496 static int
2497 wm_acquire_eeprom(struct wm_softc *sc)
2498 {
2499 uint32_t reg;
2500 int x;
2501
2502 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2503 reg = CSR_READ(sc, WMREG_EECD);
2504
2505 /* Request EEPROM access. */
2506 reg |= EECD_EE_REQ;
2507 CSR_WRITE(sc, WMREG_EECD, reg);
2508
2509 /* ..and wait for it to be granted. */
2510 for (x = 0; x < 100; x++) {
2511 reg = CSR_READ(sc, WMREG_EECD);
2512 if (reg & EECD_EE_GNT)
2513 break;
2514 delay(5);
2515 }
2516 if ((reg & EECD_EE_GNT) == 0) {
2517 aprint_error("%s: could not acquire EEPROM GNT\n",
2518 sc->sc_dev.dv_xname);
2519 reg &= ~EECD_EE_REQ;
2520 CSR_WRITE(sc, WMREG_EECD, reg);
2521 return (1);
2522 }
2523 }
2524
2525 return (0);
2526 }
2527
2528 /*
2529 * wm_release_eeprom:
2530 *
2531 * Release the EEPROM mutex.
2532 */
2533 static void
2534 wm_release_eeprom(struct wm_softc *sc)
2535 {
2536 uint32_t reg;
2537
2538 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2539 reg = CSR_READ(sc, WMREG_EECD);
2540 reg &= ~EECD_EE_REQ;
2541 CSR_WRITE(sc, WMREG_EECD, reg);
2542 }
2543 }
2544
2545 /*
2546 * wm_eeprom_sendbits:
2547 *
2548 * Send a series of bits to the EEPROM.
2549 */
2550 static void
2551 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2552 {
2553 uint32_t reg;
2554 int x;
2555
2556 reg = CSR_READ(sc, WMREG_EECD);
2557
2558 for (x = nbits; x > 0; x--) {
2559 if (bits & (1U << (x - 1)))
2560 reg |= EECD_DI;
2561 else
2562 reg &= ~EECD_DI;
2563 CSR_WRITE(sc, WMREG_EECD, reg);
2564 delay(2);
2565 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2566 delay(2);
2567 CSR_WRITE(sc, WMREG_EECD, reg);
2568 delay(2);
2569 }
2570 }
2571
2572 /*
2573 * wm_eeprom_recvbits:
2574 *
2575 * Receive a series of bits from the EEPROM.
2576 */
2577 static void
2578 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2579 {
2580 uint32_t reg, val;
2581 int x;
2582
2583 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2584
2585 val = 0;
2586 for (x = nbits; x > 0; x--) {
2587 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2588 delay(2);
2589 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2590 val |= (1U << (x - 1));
2591 CSR_WRITE(sc, WMREG_EECD, reg);
2592 delay(2);
2593 }
2594 *valp = val;
2595 }
2596
2597 /*
2598 * wm_read_eeprom_uwire:
2599 *
2600 * Read a word from the EEPROM using the MicroWire protocol.
2601 */
2602 static int
2603 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2604 {
2605 uint32_t reg, val;
2606 int i;
2607
2608 for (i = 0; i < wordcnt; i++) {
2609 /* Clear SK and DI. */
2610 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2611 CSR_WRITE(sc, WMREG_EECD, reg);
2612
2613 /* Set CHIP SELECT. */
2614 reg |= EECD_CS;
2615 CSR_WRITE(sc, WMREG_EECD, reg);
2616 delay(2);
2617
2618 /* Shift in the READ command. */
2619 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2620
2621 /* Shift in address. */
2622 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2623
2624 /* Shift out the data. */
2625 wm_eeprom_recvbits(sc, &val, 16);
2626 data[i] = val & 0xffff;
2627
2628 /* Clear CHIP SELECT. */
2629 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2630 CSR_WRITE(sc, WMREG_EECD, reg);
2631 delay(2);
2632 }
2633
2634 return (0);
2635 }
2636
2637 /*
2638 * wm_read_eeprom:
2639 *
2640 * Read data from the serial EEPROM.
2641 */
2642 static int
2643 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2644 {
2645 int rv;
2646
2647 if (wm_acquire_eeprom(sc))
2648 return (1);
2649
2650 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2651
2652 wm_release_eeprom(sc);
2653 return (rv);
2654 }
2655
2656 /*
2657 * wm_add_rxbuf:
2658 *
2659 * Add a receive buffer to the indiciated descriptor.
2660 */
2661 static int
2662 wm_add_rxbuf(struct wm_softc *sc, int idx)
2663 {
2664 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2665 struct mbuf *m;
2666 int error;
2667
2668 MGETHDR(m, M_DONTWAIT, MT_DATA);
2669 if (m == NULL)
2670 return (ENOBUFS);
2671
2672 MCLGET(m, M_DONTWAIT);
2673 if ((m->m_flags & M_EXT) == 0) {
2674 m_freem(m);
2675 return (ENOBUFS);
2676 }
2677
2678 if (rxs->rxs_mbuf != NULL)
2679 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2680
2681 rxs->rxs_mbuf = m;
2682
2683 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2684 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2685 BUS_DMA_READ|BUS_DMA_NOWAIT);
2686 if (error) {
2687 printf("%s: unable to load rx DMA map %d, error = %d\n",
2688 sc->sc_dev.dv_xname, idx, error);
2689 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2690 }
2691
2692 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2693 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2694
2695 WM_INIT_RXDESC(sc, idx);
2696
2697 return (0);
2698 }
2699
2700 /*
2701 * wm_set_ral:
2702 *
2703 * Set an entery in the receive address list.
2704 */
2705 static void
2706 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2707 {
2708 uint32_t ral_lo, ral_hi;
2709
2710 if (enaddr != NULL) {
2711 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2712 (enaddr[3] << 24);
2713 ral_hi = enaddr[4] | (enaddr[5] << 8);
2714 ral_hi |= RAL_AV;
2715 } else {
2716 ral_lo = 0;
2717 ral_hi = 0;
2718 }
2719
2720 if (sc->sc_type >= WM_T_82544) {
2721 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2722 ral_lo);
2723 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2724 ral_hi);
2725 } else {
2726 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2727 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2728 }
2729 }
2730
2731 /*
2732 * wm_mchash:
2733 *
2734 * Compute the hash of the multicast address for the 4096-bit
2735 * multicast filter.
2736 */
2737 static uint32_t
2738 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2739 {
2740 static const int lo_shift[4] = { 4, 3, 2, 0 };
2741 static const int hi_shift[4] = { 4, 5, 6, 8 };
2742 uint32_t hash;
2743
2744 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2745 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2746
2747 return (hash & 0xfff);
2748 }
2749
2750 /*
2751 * wm_set_filter:
2752 *
2753 * Set up the receive filter.
2754 */
2755 static void
2756 wm_set_filter(struct wm_softc *sc)
2757 {
2758 struct ethercom *ec = &sc->sc_ethercom;
2759 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2760 struct ether_multi *enm;
2761 struct ether_multistep step;
2762 bus_addr_t mta_reg;
2763 uint32_t hash, reg, bit;
2764 int i;
2765
2766 if (sc->sc_type >= WM_T_82544)
2767 mta_reg = WMREG_CORDOVA_MTA;
2768 else
2769 mta_reg = WMREG_MTA;
2770
2771 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2772
2773 if (ifp->if_flags & IFF_BROADCAST)
2774 sc->sc_rctl |= RCTL_BAM;
2775 if (ifp->if_flags & IFF_PROMISC) {
2776 sc->sc_rctl |= RCTL_UPE;
2777 goto allmulti;
2778 }
2779
2780 /*
2781 * Set the station address in the first RAL slot, and
2782 * clear the remaining slots.
2783 */
2784 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2785 for (i = 1; i < WM_RAL_TABSIZE; i++)
2786 wm_set_ral(sc, NULL, i);
2787
2788 /* Clear out the multicast table. */
2789 for (i = 0; i < WM_MC_TABSIZE; i++)
2790 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2791
2792 ETHER_FIRST_MULTI(step, ec, enm);
2793 while (enm != NULL) {
2794 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2795 /*
2796 * We must listen to a range of multicast addresses.
2797 * For now, just accept all multicasts, rather than
2798 * trying to set only those filter bits needed to match
2799 * the range. (At this time, the only use of address
2800 * ranges is for IP multicast routing, for which the
2801 * range is big enough to require all bits set.)
2802 */
2803 goto allmulti;
2804 }
2805
2806 hash = wm_mchash(sc, enm->enm_addrlo);
2807
2808 reg = (hash >> 5) & 0x7f;
2809 bit = hash & 0x1f;
2810
2811 hash = CSR_READ(sc, mta_reg + (reg << 2));
2812 hash |= 1U << bit;
2813
2814 /* XXX Hardware bug?? */
2815 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2816 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2817 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2818 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2819 } else
2820 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2821
2822 ETHER_NEXT_MULTI(step, enm);
2823 }
2824
2825 ifp->if_flags &= ~IFF_ALLMULTI;
2826 goto setit;
2827
2828 allmulti:
2829 ifp->if_flags |= IFF_ALLMULTI;
2830 sc->sc_rctl |= RCTL_MPE;
2831
2832 setit:
2833 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2834 }
2835
2836 /*
2837 * wm_tbi_mediainit:
2838 *
2839 * Initialize media for use on 1000BASE-X devices.
2840 */
2841 static void
2842 wm_tbi_mediainit(struct wm_softc *sc)
2843 {
2844 const char *sep = "";
2845
2846 if (sc->sc_type < WM_T_82543)
2847 sc->sc_tipg = TIPG_WM_DFLT;
2848 else
2849 sc->sc_tipg = TIPG_LG_DFLT;
2850
2851 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2852 wm_tbi_mediastatus);
2853
2854 /*
2855 * SWD Pins:
2856 *
2857 * 0 = Link LED (output)
2858 * 1 = Loss Of Signal (input)
2859 */
2860 sc->sc_ctrl |= CTRL_SWDPIO(0);
2861 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2862
2863 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2864
2865 #define ADD(ss, mm, dd) \
2866 do { \
2867 printf("%s%s", sep, ss); \
2868 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2869 sep = ", "; \
2870 } while (/*CONSTCOND*/0)
2871
2872 printf("%s: ", sc->sc_dev.dv_xname);
2873 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2874 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2875 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2876 printf("\n");
2877
2878 #undef ADD
2879
2880 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2881 }
2882
2883 /*
2884 * wm_tbi_mediastatus: [ifmedia interface function]
2885 *
2886 * Get the current interface media status on a 1000BASE-X device.
2887 */
2888 static void
2889 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2890 {
2891 struct wm_softc *sc = ifp->if_softc;
2892
2893 ifmr->ifm_status = IFM_AVALID;
2894 ifmr->ifm_active = IFM_ETHER;
2895
2896 if (sc->sc_tbi_linkup == 0) {
2897 ifmr->ifm_active |= IFM_NONE;
2898 return;
2899 }
2900
2901 ifmr->ifm_status |= IFM_ACTIVE;
2902 ifmr->ifm_active |= IFM_1000_SX;
2903 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2904 ifmr->ifm_active |= IFM_FDX;
2905 }
2906
2907 /*
2908 * wm_tbi_mediachange: [ifmedia interface function]
2909 *
2910 * Set hardware to newly-selected media on a 1000BASE-X device.
2911 */
2912 static int
2913 wm_tbi_mediachange(struct ifnet *ifp)
2914 {
2915 struct wm_softc *sc = ifp->if_softc;
2916 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2917 uint32_t status;
2918 int i;
2919
2920 sc->sc_txcw = ife->ifm_data;
2921 if (sc->sc_ctrl & CTRL_RFCE)
2922 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2923 if (sc->sc_ctrl & CTRL_TFCE)
2924 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2925 sc->sc_txcw |= TXCW_ANE;
2926
2927 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2928 delay(10000);
2929
2930 sc->sc_tbi_anstate = 0;
2931
2932 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2933 /* Have signal; wait for the link to come up. */
2934 for (i = 0; i < 50; i++) {
2935 delay(10000);
2936 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2937 break;
2938 }
2939
2940 status = CSR_READ(sc, WMREG_STATUS);
2941 if (status & STATUS_LU) {
2942 /* Link is up. */
2943 DPRINTF(WM_DEBUG_LINK,
2944 ("%s: LINK: set media -> link up %s\n",
2945 sc->sc_dev.dv_xname,
2946 (status & STATUS_FD) ? "FDX" : "HDX"));
2947 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2948 if (status & STATUS_FD)
2949 sc->sc_tctl |=
2950 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2951 else
2952 sc->sc_tctl |=
2953 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2954 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2955 sc->sc_tbi_linkup = 1;
2956 } else {
2957 /* Link is down. */
2958 DPRINTF(WM_DEBUG_LINK,
2959 ("%s: LINK: set media -> link down\n",
2960 sc->sc_dev.dv_xname));
2961 sc->sc_tbi_linkup = 0;
2962 }
2963 } else {
2964 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2965 sc->sc_dev.dv_xname));
2966 sc->sc_tbi_linkup = 0;
2967 }
2968
2969 wm_tbi_set_linkled(sc);
2970
2971 return (0);
2972 }
2973
2974 /*
2975 * wm_tbi_set_linkled:
2976 *
2977 * Update the link LED on 1000BASE-X devices.
2978 */
2979 static void
2980 wm_tbi_set_linkled(struct wm_softc *sc)
2981 {
2982
2983 if (sc->sc_tbi_linkup)
2984 sc->sc_ctrl |= CTRL_SWDPIN(0);
2985 else
2986 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2987
2988 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2989 }
2990
2991 /*
2992 * wm_tbi_check_link:
2993 *
2994 * Check the link on 1000BASE-X devices.
2995 */
2996 static void
2997 wm_tbi_check_link(struct wm_softc *sc)
2998 {
2999 uint32_t rxcw, ctrl, status;
3000
3001 if (sc->sc_tbi_anstate == 0)
3002 return;
3003 else if (sc->sc_tbi_anstate > 1) {
3004 DPRINTF(WM_DEBUG_LINK,
3005 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3006 sc->sc_tbi_anstate));
3007 sc->sc_tbi_anstate--;
3008 return;
3009 }
3010
3011 sc->sc_tbi_anstate = 0;
3012
3013 rxcw = CSR_READ(sc, WMREG_RXCW);
3014 ctrl = CSR_READ(sc, WMREG_CTRL);
3015 status = CSR_READ(sc, WMREG_STATUS);
3016
3017 if ((status & STATUS_LU) == 0) {
3018 DPRINTF(WM_DEBUG_LINK,
3019 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3020 sc->sc_tbi_linkup = 0;
3021 } else {
3022 DPRINTF(WM_DEBUG_LINK,
3023 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3024 (status & STATUS_FD) ? "FDX" : "HDX"));
3025 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3026 if (status & STATUS_FD)
3027 sc->sc_tctl |=
3028 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3029 else
3030 sc->sc_tctl |=
3031 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3032 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3033 sc->sc_tbi_linkup = 1;
3034 }
3035
3036 wm_tbi_set_linkled(sc);
3037 }
3038
3039 /*
3040 * wm_gmii_reset:
3041 *
3042 * Reset the PHY.
3043 */
3044 static void
3045 wm_gmii_reset(struct wm_softc *sc)
3046 {
3047 uint32_t reg;
3048
3049 if (sc->sc_type >= WM_T_82544) {
3050 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3051 delay(20000);
3052
3053 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3054 delay(20000);
3055 } else {
3056 /* The PHY reset pin is active-low. */
3057 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3058 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3059 CTRL_EXT_SWDPIN(4));
3060 reg |= CTRL_EXT_SWDPIO(4);
3061
3062 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3063 delay(10);
3064
3065 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3066 delay(10);
3067
3068 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3069 delay(10);
3070 #if 0
3071 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3072 #endif
3073 }
3074 }
3075
3076 /*
3077 * wm_gmii_mediainit:
3078 *
3079 * Initialize media for use on 1000BASE-T devices.
3080 */
3081 static void
3082 wm_gmii_mediainit(struct wm_softc *sc)
3083 {
3084 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3085
3086 /* We have MII. */
3087 sc->sc_flags |= WM_F_HAS_MII;
3088
3089 sc->sc_tipg = TIPG_1000T_DFLT;
3090
3091 /*
3092 * Let the chip set speed/duplex on its own based on
3093 * signals from the PHY.
3094 */
3095 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3096 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3097
3098 /* Initialize our media structures and probe the GMII. */
3099 sc->sc_mii.mii_ifp = ifp;
3100
3101 if (sc->sc_type >= WM_T_82544) {
3102 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3103 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3104 } else {
3105 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3106 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3107 }
3108 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3109
3110 wm_gmii_reset(sc);
3111
3112 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3113 wm_gmii_mediastatus);
3114
3115 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3116 MII_OFFSET_ANY, 0);
3117 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3118 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3119 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3120 } else
3121 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3122 }
3123
3124 /*
3125 * wm_gmii_mediastatus: [ifmedia interface function]
3126 *
3127 * Get the current interface media status on a 1000BASE-T device.
3128 */
3129 static void
3130 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3131 {
3132 struct wm_softc *sc = ifp->if_softc;
3133
3134 mii_pollstat(&sc->sc_mii);
3135 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3136 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3137 }
3138
3139 /*
3140 * wm_gmii_mediachange: [ifmedia interface function]
3141 *
3142 * Set hardware to newly-selected media on a 1000BASE-T device.
3143 */
3144 static int
3145 wm_gmii_mediachange(struct ifnet *ifp)
3146 {
3147 struct wm_softc *sc = ifp->if_softc;
3148
3149 if (ifp->if_flags & IFF_UP)
3150 mii_mediachg(&sc->sc_mii);
3151 return (0);
3152 }
3153
3154 #define MDI_IO CTRL_SWDPIN(2)
3155 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3156 #define MDI_CLK CTRL_SWDPIN(3)
3157
3158 static void
3159 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3160 {
3161 uint32_t i, v;
3162
3163 v = CSR_READ(sc, WMREG_CTRL);
3164 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3165 v |= MDI_DIR | CTRL_SWDPIO(3);
3166
3167 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3168 if (data & i)
3169 v |= MDI_IO;
3170 else
3171 v &= ~MDI_IO;
3172 CSR_WRITE(sc, WMREG_CTRL, v);
3173 delay(10);
3174 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3175 delay(10);
3176 CSR_WRITE(sc, WMREG_CTRL, v);
3177 delay(10);
3178 }
3179 }
3180
3181 static uint32_t
3182 i82543_mii_recvbits(struct wm_softc *sc)
3183 {
3184 uint32_t v, i, data = 0;
3185
3186 v = CSR_READ(sc, WMREG_CTRL);
3187 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3188 v |= CTRL_SWDPIO(3);
3189
3190 CSR_WRITE(sc, WMREG_CTRL, v);
3191 delay(10);
3192 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3193 delay(10);
3194 CSR_WRITE(sc, WMREG_CTRL, v);
3195 delay(10);
3196
3197 for (i = 0; i < 16; i++) {
3198 data <<= 1;
3199 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3200 delay(10);
3201 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3202 data |= 1;
3203 CSR_WRITE(sc, WMREG_CTRL, v);
3204 delay(10);
3205 }
3206
3207 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3208 delay(10);
3209 CSR_WRITE(sc, WMREG_CTRL, v);
3210 delay(10);
3211
3212 return (data);
3213 }
3214
3215 #undef MDI_IO
3216 #undef MDI_DIR
3217 #undef MDI_CLK
3218
3219 /*
3220 * wm_gmii_i82543_readreg: [mii interface function]
3221 *
3222 * Read a PHY register on the GMII (i82543 version).
3223 */
3224 static int
3225 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3226 {
3227 struct wm_softc *sc = (void *) self;
3228 int rv;
3229
3230 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3231 i82543_mii_sendbits(sc, reg | (phy << 5) |
3232 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3233 rv = i82543_mii_recvbits(sc) & 0xffff;
3234
3235 DPRINTF(WM_DEBUG_GMII,
3236 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3237 sc->sc_dev.dv_xname, phy, reg, rv));
3238
3239 return (rv);
3240 }
3241
3242 /*
3243 * wm_gmii_i82543_writereg: [mii interface function]
3244 *
3245 * Write a PHY register on the GMII (i82543 version).
3246 */
3247 static void
3248 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3249 {
3250 struct wm_softc *sc = (void *) self;
3251
3252 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3253 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3254 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3255 (MII_COMMAND_START << 30), 32);
3256 }
3257
3258 /*
3259 * wm_gmii_i82544_readreg: [mii interface function]
3260 *
3261 * Read a PHY register on the GMII.
3262 */
3263 static int
3264 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3265 {
3266 struct wm_softc *sc = (void *) self;
3267 uint32_t mdic;
3268 int i, rv;
3269
3270 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3271 MDIC_REGADD(reg));
3272
3273 for (i = 0; i < 100; i++) {
3274 mdic = CSR_READ(sc, WMREG_MDIC);
3275 if (mdic & MDIC_READY)
3276 break;
3277 delay(10);
3278 }
3279
3280 if ((mdic & MDIC_READY) == 0) {
3281 printf("%s: MDIC read timed out: phy %d reg %d\n",
3282 sc->sc_dev.dv_xname, phy, reg);
3283 rv = 0;
3284 } else if (mdic & MDIC_E) {
3285 #if 0 /* This is normal if no PHY is present. */
3286 printf("%s: MDIC read error: phy %d reg %d\n",
3287 sc->sc_dev.dv_xname, phy, reg);
3288 #endif
3289 rv = 0;
3290 } else {
3291 rv = MDIC_DATA(mdic);
3292 if (rv == 0xffff)
3293 rv = 0;
3294 }
3295
3296 return (rv);
3297 }
3298
3299 /*
3300 * wm_gmii_i82544_writereg: [mii interface function]
3301 *
3302 * Write a PHY register on the GMII.
3303 */
3304 static void
3305 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3306 {
3307 struct wm_softc *sc = (void *) self;
3308 uint32_t mdic;
3309 int i;
3310
3311 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3312 MDIC_REGADD(reg) | MDIC_DATA(val));
3313
3314 for (i = 0; i < 100; i++) {
3315 mdic = CSR_READ(sc, WMREG_MDIC);
3316 if (mdic & MDIC_READY)
3317 break;
3318 delay(10);
3319 }
3320
3321 if ((mdic & MDIC_READY) == 0)
3322 printf("%s: MDIC write timed out: phy %d reg %d\n",
3323 sc->sc_dev.dv_xname, phy, reg);
3324 else if (mdic & MDIC_E)
3325 printf("%s: MDIC write error: phy %d reg %d\n",
3326 sc->sc_dev.dv_xname, phy, reg);
3327 }
3328
3329 /*
3330 * wm_gmii_statchg: [mii interface function]
3331 *
3332 * Callback from MII layer when media changes.
3333 */
3334 static void
3335 wm_gmii_statchg(struct device *self)
3336 {
3337 struct wm_softc *sc = (void *) self;
3338
3339 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3340
3341 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3342 DPRINTF(WM_DEBUG_LINK,
3343 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3344 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3345 } else {
3346 DPRINTF(WM_DEBUG_LINK,
3347 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3348 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3349 }
3350
3351 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3352 }
3353