if_wm.c revision 1.63 1 /* $NetBSD: if_wm.c,v 1.63 2003/11/22 08:32:12 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out performance stability issue on i82547 (fvdl).
45 * - Figure out what to do with the i82545GM and i82546GB
46 * SERDES controllers.
47 * - Fix hw VLAN assist.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.63 2003/11/22 08:32:12 thorpej Exp $");
52
53 #include "bpfilter.h"
54 #include "rnd.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/callout.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/queue.h>
67
68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
69
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and manage
118 * up to 64 of them at a time. We allow up to 16 DMA segments per
119 * packet.
120 */
121 #define WM_NTXSEGS 16
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN 64
124 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
125 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
126 #define WM_NTXDESC 256
127 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
128 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
129 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
130
131 /*
132 * Receive descriptor list size. We have one Rx buffer for normal
133 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
134 * packet. We allocate 256 receive descriptors, each with a 2k
135 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
136 */
137 #define WM_NRXDESC 256
138 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
139 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
140 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
141
142 /*
143 * Control structures are DMA'd to the i82542 chip. We allocate them in
144 * a single clump that maps to a single DMA segment to make serveral things
145 * easier.
146 */
147 struct wm_control_data {
148 /*
149 * The transmit descriptors.
150 */
151 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
152
153 /*
154 * The receive descriptors.
155 */
156 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
157 };
158
159 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
160 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
161 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
162
163 /*
164 * Software state for transmit jobs.
165 */
166 struct wm_txsoft {
167 struct mbuf *txs_mbuf; /* head of our mbuf chain */
168 bus_dmamap_t txs_dmamap; /* our DMA map */
169 int txs_firstdesc; /* first descriptor in packet */
170 int txs_lastdesc; /* last descriptor in packet */
171 int txs_ndesc; /* # of descriptors used */
172 };
173
174 /*
175 * Software state for receive buffers. Each descriptor gets a
176 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
177 * more than one buffer, we chain them together.
178 */
179 struct wm_rxsoft {
180 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
181 bus_dmamap_t rxs_dmamap; /* our DMA map */
182 };
183
184 typedef enum {
185 WM_T_unknown = 0,
186 WM_T_82542_2_0, /* i82542 2.0 (really old) */
187 WM_T_82542_2_1, /* i82542 2.1+ (old) */
188 WM_T_82543, /* i82543 */
189 WM_T_82544, /* i82544 */
190 WM_T_82540, /* i82540 */
191 WM_T_82545, /* i82545 */
192 WM_T_82545_3, /* i82545 3.0+ */
193 WM_T_82546, /* i82546 */
194 WM_T_82546_3, /* i82546 3.0+ */
195 WM_T_82541, /* i82541 */
196 WM_T_82541_2, /* i82541 2.0+ */
197 WM_T_82547, /* i82547 */
198 WM_T_82547_2, /* i82547 2.0+ */
199 } wm_chip_type;
200
201 /*
202 * Software state per device.
203 */
204 struct wm_softc {
205 struct device sc_dev; /* generic device information */
206 bus_space_tag_t sc_st; /* bus space tag */
207 bus_space_handle_t sc_sh; /* bus space handle */
208 bus_space_tag_t sc_iot; /* I/O space tag */
209 bus_space_handle_t sc_ioh; /* I/O space handle */
210 bus_dma_tag_t sc_dmat; /* bus DMA tag */
211 struct ethercom sc_ethercom; /* ethernet common data */
212 void *sc_sdhook; /* shutdown hook */
213
214 wm_chip_type sc_type; /* chip type */
215 int sc_flags; /* flags; see below */
216 int sc_bus_speed; /* PCI/PCIX bus speed */
217 int sc_pcix_offset; /* PCIX capability register offset */
218
219 void *sc_ih; /* interrupt cookie */
220
221 int sc_ee_addrbits; /* EEPROM address bits */
222
223 struct mii_data sc_mii; /* MII/media information */
224
225 struct callout sc_tick_ch; /* tick callout */
226
227 bus_dmamap_t sc_cddmamap; /* control data DMA map */
228 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
229
230 int sc_align_tweak;
231
232 /*
233 * Software state for the transmit and receive descriptors.
234 */
235 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
236 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
237
238 /*
239 * Control data structures.
240 */
241 struct wm_control_data *sc_control_data;
242 #define sc_txdescs sc_control_data->wcd_txdescs
243 #define sc_rxdescs sc_control_data->wcd_rxdescs
244
245 #ifdef WM_EVENT_COUNTERS
246 /* Event counters. */
247 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
248 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
249 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
250 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
251 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
252 struct evcnt sc_ev_rxintr; /* Rx interrupts */
253 struct evcnt sc_ev_linkintr; /* Link interrupts */
254
255 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
256 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
257 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
258 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
259
260 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
261 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
262 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
263
264 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
265 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
266
267 struct evcnt sc_ev_tu; /* Tx underrun */
268 #endif /* WM_EVENT_COUNTERS */
269
270 bus_addr_t sc_tdt_reg; /* offset of TDT register */
271
272 int sc_txfree; /* number of free Tx descriptors */
273 int sc_txnext; /* next ready Tx descriptor */
274
275 int sc_txsfree; /* number of free Tx jobs */
276 int sc_txsnext; /* next free Tx job */
277 int sc_txsdirty; /* dirty Tx jobs */
278
279 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
280 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
281
282 bus_addr_t sc_rdt_reg; /* offset of RDT register */
283
284 int sc_rxptr; /* next ready Rx descriptor/queue ent */
285 int sc_rxdiscard;
286 int sc_rxlen;
287 struct mbuf *sc_rxhead;
288 struct mbuf *sc_rxtail;
289 struct mbuf **sc_rxtailp;
290
291 uint32_t sc_ctrl; /* prototype CTRL register */
292 #if 0
293 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
294 #endif
295 uint32_t sc_icr; /* prototype interrupt bits */
296 uint32_t sc_tctl; /* prototype TCTL register */
297 uint32_t sc_rctl; /* prototype RCTL register */
298 uint32_t sc_txcw; /* prototype TXCW register */
299 uint32_t sc_tipg; /* prototype TIPG register */
300
301 int sc_tbi_linkup; /* TBI link status */
302 int sc_tbi_anstate; /* autonegotiation state */
303
304 int sc_mchash_type; /* multicast filter offset */
305
306 #if NRND > 0
307 rndsource_element_t rnd_source; /* random source */
308 #endif
309 };
310
311 #define WM_RXCHAIN_RESET(sc) \
312 do { \
313 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
314 *(sc)->sc_rxtailp = NULL; \
315 (sc)->sc_rxlen = 0; \
316 } while (/*CONSTCOND*/0)
317
318 #define WM_RXCHAIN_LINK(sc, m) \
319 do { \
320 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
321 (sc)->sc_rxtailp = &(m)->m_next; \
322 } while (/*CONSTCOND*/0)
323
324 /* sc_flags */
325 #define WM_F_HAS_MII 0x01 /* has MII */
326 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
327 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
328 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
329 #define WM_F_BUS64 0x20 /* bus is 64-bit */
330 #define WM_F_PCIX 0x40 /* bus is PCI-X */
331
332 #ifdef WM_EVENT_COUNTERS
333 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
334 #else
335 #define WM_EVCNT_INCR(ev) /* nothing */
336 #endif
337
338 #define CSR_READ(sc, reg) \
339 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
340 #define CSR_WRITE(sc, reg, val) \
341 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
342
343 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
344 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
345
346 #define WM_CDTXSYNC(sc, x, n, ops) \
347 do { \
348 int __x, __n; \
349 \
350 __x = (x); \
351 __n = (n); \
352 \
353 /* If it will wrap around, sync to the end of the ring. */ \
354 if ((__x + __n) > WM_NTXDESC) { \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
357 (WM_NTXDESC - __x), (ops)); \
358 __n -= (WM_NTXDESC - __x); \
359 __x = 0; \
360 } \
361 \
362 /* Now sync whatever is left. */ \
363 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
364 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
365 } while (/*CONSTCOND*/0)
366
367 #define WM_CDRXSYNC(sc, x, ops) \
368 do { \
369 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
370 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
371 } while (/*CONSTCOND*/0)
372
373 #define WM_INIT_RXDESC(sc, x) \
374 do { \
375 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
376 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
377 struct mbuf *__m = __rxs->rxs_mbuf; \
378 \
379 /* \
380 * Note: We scoot the packet forward 2 bytes in the buffer \
381 * so that the payload after the Ethernet header is aligned \
382 * to a 4-byte boundary. \
383 * \
384 * XXX BRAINDAMAGE ALERT! \
385 * The stupid chip uses the same size for every buffer, which \
386 * is set in the Receive Control register. We are using the 2K \
387 * size option, but what we REALLY want is (2K - 2)! For this \
388 * reason, we can't "scoot" packets longer than the standard \
389 * Ethernet MTU. On strict-alignment platforms, if the total \
390 * size exceeds (2K - 2) we set align_tweak to 0 and let \
391 * the upper layer copy the headers. \
392 */ \
393 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
394 \
395 __rxd->wrx_addr.wa_low = \
396 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
397 (sc)->sc_align_tweak); \
398 __rxd->wrx_addr.wa_high = 0; \
399 __rxd->wrx_len = 0; \
400 __rxd->wrx_cksum = 0; \
401 __rxd->wrx_status = 0; \
402 __rxd->wrx_errors = 0; \
403 __rxd->wrx_special = 0; \
404 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
405 \
406 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
407 } while (/*CONSTCOND*/0)
408
409 static void wm_start(struct ifnet *);
410 static void wm_watchdog(struct ifnet *);
411 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
412 static int wm_init(struct ifnet *);
413 static void wm_stop(struct ifnet *, int);
414
415 static void wm_shutdown(void *);
416
417 static void wm_reset(struct wm_softc *);
418 static void wm_rxdrain(struct wm_softc *);
419 static int wm_add_rxbuf(struct wm_softc *, int);
420 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
421 static void wm_tick(void *);
422
423 static void wm_set_filter(struct wm_softc *);
424
425 static int wm_intr(void *);
426 static void wm_txintr(struct wm_softc *);
427 static void wm_rxintr(struct wm_softc *);
428 static void wm_linkintr(struct wm_softc *, uint32_t);
429
430 static void wm_tbi_mediainit(struct wm_softc *);
431 static int wm_tbi_mediachange(struct ifnet *);
432 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
433
434 static void wm_tbi_set_linkled(struct wm_softc *);
435 static void wm_tbi_check_link(struct wm_softc *);
436
437 static void wm_gmii_reset(struct wm_softc *);
438
439 static int wm_gmii_i82543_readreg(struct device *, int, int);
440 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
441
442 static int wm_gmii_i82544_readreg(struct device *, int, int);
443 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
444
445 static void wm_gmii_statchg(struct device *);
446
447 static void wm_gmii_mediainit(struct wm_softc *);
448 static int wm_gmii_mediachange(struct ifnet *);
449 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
450
451 static int wm_match(struct device *, struct cfdata *, void *);
452 static void wm_attach(struct device *, struct device *, void *);
453
454 CFATTACH_DECL(wm, sizeof(struct wm_softc),
455 wm_match, wm_attach, NULL, NULL);
456
457 /*
458 * Devices supported by this driver.
459 */
460 const struct wm_product {
461 pci_vendor_id_t wmp_vendor;
462 pci_product_id_t wmp_product;
463 const char *wmp_name;
464 wm_chip_type wmp_type;
465 int wmp_flags;
466 #define WMP_F_1000X 0x01
467 #define WMP_F_1000T 0x02
468 } wm_products[] = {
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
470 "Intel i82542 1000BASE-X Ethernet",
471 WM_T_82542_2_1, WMP_F_1000X },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
474 "Intel i82543GC 1000BASE-X Ethernet",
475 WM_T_82543, WMP_F_1000X },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
478 "Intel i82543GC 1000BASE-T Ethernet",
479 WM_T_82543, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
482 "Intel i82544EI 1000BASE-T Ethernet",
483 WM_T_82544, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
486 "Intel i82544EI 1000BASE-X Ethernet",
487 WM_T_82544, WMP_F_1000X },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
490 "Intel i82544GC 1000BASE-T Ethernet",
491 WM_T_82544, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
494 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
495 WM_T_82544, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
498 "Intel i82540EM 1000BASE-T Ethernet",
499 WM_T_82540, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
502 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
503 WM_T_82540, WMP_F_1000T },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
506 "Intel i82540EP 1000BASE-T Ethernet",
507 WM_T_82540, WMP_F_1000T },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
510 "Intel i82540EP 1000BASE-T Ethernet",
511 WM_T_82540, WMP_F_1000T },
512
513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
514 "Intel i82540EP 1000BASE-T Ethernet",
515 WM_T_82540, WMP_F_1000T },
516
517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
518 "Intel i82545EM 1000BASE-T Ethernet",
519 WM_T_82545, WMP_F_1000T },
520
521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
522 "Intel i82545GM 1000BASE-T Ethernet",
523 WM_T_82545_3, WMP_F_1000T },
524
525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
526 "Intel i82545GM 1000BASE-X Ethernet",
527 WM_T_82545_3, WMP_F_1000X },
528 #if 0
529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
530 "Intel i82545GM Gigabit Ethernet (SERDES)",
531 WM_T_82545_3, WMP_F_SERDES },
532 #endif
533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
534 "Intel i82546EB 1000BASE-T Ethernet",
535 WM_T_82546, WMP_F_1000T },
536
537 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
538 "Intel i82546EB 1000BASE-T Ethernet",
539 WM_T_82546, WMP_F_1000T },
540
541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
542 "Intel i82545EM 1000BASE-X Ethernet",
543 WM_T_82545, WMP_F_1000X },
544
545 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
546 "Intel i82546EB 1000BASE-X Ethernet",
547 WM_T_82546, WMP_F_1000X },
548
549 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
550 "Intel i82546GB 1000BASE-T Ethernet",
551 WM_T_82546_3, WMP_F_1000T },
552
553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
554 "Intel i82546GB 1000BASE-X Ethernet",
555 WM_T_82546_3, WMP_F_1000X },
556 #if 0
557 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
558 "Intel i82546GB Gigabit Ethernet (SERDES)",
559 WM_T_82546_3, WMP_F_SERDES },
560 #endif
561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
562 "Intel i82541EI 1000BASE-T Ethernet",
563 WM_T_82541, WMP_F_1000T },
564
565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
566 "Intel i82541EI Mobile 1000BASE-T Ethernet",
567 WM_T_82541, WMP_F_1000T },
568
569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
570 "Intel i82541ER 1000BASE-T Ethernet",
571 WM_T_82541_2, WMP_F_1000T },
572
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
574 "Intel i82541GI 1000BASE-T Ethernet",
575 WM_T_82541_2, WMP_F_1000T },
576
577 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
578 "Intel i82541GI Mobile 1000BASE-T Ethernet",
579 WM_T_82541_2, WMP_F_1000T },
580
581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
582 "Intel i82547EI 1000BASE-T Ethernet",
583 WM_T_82547, WMP_F_1000T },
584
585 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
586 "Intel i82547GI 1000BASE-T Ethernet",
587 WM_T_82547_2, WMP_F_1000T },
588 { 0, 0,
589 NULL,
590 0, 0 },
591 };
592
593 #ifdef WM_EVENT_COUNTERS
594 #if WM_NTXSEGS != 16
595 #error Update wm_txseg_evcnt_names
596 #endif
597 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
598 "txseg1",
599 "txseg2",
600 "txseg3",
601 "txseg4",
602 "txseg5",
603 "txseg6",
604 "txseg7",
605 "txseg8",
606 "txseg9",
607 "txseg10",
608 "txseg11",
609 "txseg12",
610 "txseg13",
611 "txseg14",
612 "txseg15",
613 "txseg16",
614 };
615 #endif /* WM_EVENT_COUNTERS */
616
617 #if 0 /* Not currently used */
618 static __inline uint32_t
619 wm_io_read(struct wm_softc *sc, int reg)
620 {
621
622 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
623 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
624 }
625 #endif
626
627 static __inline void
628 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
629 {
630
631 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
632 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
633 }
634
635 static const struct wm_product *
636 wm_lookup(const struct pci_attach_args *pa)
637 {
638 const struct wm_product *wmp;
639
640 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
641 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
642 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
643 return (wmp);
644 }
645 return (NULL);
646 }
647
648 static int
649 wm_match(struct device *parent, struct cfdata *cf, void *aux)
650 {
651 struct pci_attach_args *pa = aux;
652
653 if (wm_lookup(pa) != NULL)
654 return (1);
655
656 return (0);
657 }
658
659 static void
660 wm_attach(struct device *parent, struct device *self, void *aux)
661 {
662 struct wm_softc *sc = (void *) self;
663 struct pci_attach_args *pa = aux;
664 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
665 pci_chipset_tag_t pc = pa->pa_pc;
666 pci_intr_handle_t ih;
667 const char *intrstr = NULL;
668 const char *eetype;
669 bus_space_tag_t memt;
670 bus_space_handle_t memh;
671 bus_dma_segment_t seg;
672 int memh_valid;
673 int i, rseg, error;
674 const struct wm_product *wmp;
675 uint8_t enaddr[ETHER_ADDR_LEN];
676 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
677 pcireg_t preg, memtype;
678 uint32_t reg;
679 int pmreg;
680
681 callout_init(&sc->sc_tick_ch);
682
683 wmp = wm_lookup(pa);
684 if (wmp == NULL) {
685 printf("\n");
686 panic("wm_attach: impossible");
687 }
688
689 sc->sc_dmat = pa->pa_dmat;
690
691 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
692 aprint_naive(": Ethernet controller\n");
693 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
694
695 sc->sc_type = wmp->wmp_type;
696 if (sc->sc_type < WM_T_82543) {
697 if (preg < 2) {
698 aprint_error("%s: i82542 must be at least rev. 2\n",
699 sc->sc_dev.dv_xname);
700 return;
701 }
702 if (preg < 3)
703 sc->sc_type = WM_T_82542_2_0;
704 }
705
706 /*
707 * Map the device. All devices support memory-mapped acccess,
708 * and it is really required for normal operation.
709 */
710 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
711 switch (memtype) {
712 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
713 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
714 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
715 memtype, 0, &memt, &memh, NULL, NULL) == 0);
716 break;
717 default:
718 memh_valid = 0;
719 }
720
721 if (memh_valid) {
722 sc->sc_st = memt;
723 sc->sc_sh = memh;
724 } else {
725 aprint_error("%s: unable to map device registers\n",
726 sc->sc_dev.dv_xname);
727 return;
728 }
729
730 /*
731 * In addition, i82544 and later support I/O mapped indirect
732 * register access. It is not desirable (nor supported in
733 * this driver) to use it for normal operation, though it is
734 * required to work around bugs in some chip versions.
735 */
736 if (sc->sc_type >= WM_T_82544) {
737 /* First we have to find the I/O BAR. */
738 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
739 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
740 PCI_MAPREG_TYPE_IO)
741 break;
742 }
743 if (i == PCI_MAPREG_END)
744 aprint_error("%s: WARNING: unable to find I/O BAR\n",
745 sc->sc_dev.dv_xname);
746 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
747 0, &sc->sc_iot, &sc->sc_ioh,
748 NULL, NULL) == 0)
749 sc->sc_flags |= WM_F_IOH_VALID;
750 else
751 aprint_error("%s: WARNING: unable to map I/O space\n",
752 sc->sc_dev.dv_xname);
753 }
754
755 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
756 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
757 preg |= PCI_COMMAND_MASTER_ENABLE;
758 if (sc->sc_type < WM_T_82542_2_1)
759 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
760 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
761
762 /* Get it out of power save mode, if needed. */
763 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
764 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
765 PCI_PMCSR_STATE_MASK;
766 if (preg == PCI_PMCSR_STATE_D3) {
767 /*
768 * The card has lost all configuration data in
769 * this state, so punt.
770 */
771 aprint_error("%s: unable to wake from power state D3\n",
772 sc->sc_dev.dv_xname);
773 return;
774 }
775 if (preg != PCI_PMCSR_STATE_D0) {
776 aprint_normal("%s: waking up from power state D%d\n",
777 sc->sc_dev.dv_xname, preg);
778 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
779 PCI_PMCSR_STATE_D0);
780 }
781 }
782
783 /*
784 * Map and establish our interrupt.
785 */
786 if (pci_intr_map(pa, &ih)) {
787 aprint_error("%s: unable to map interrupt\n",
788 sc->sc_dev.dv_xname);
789 return;
790 }
791 intrstr = pci_intr_string(pc, ih);
792 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
793 if (sc->sc_ih == NULL) {
794 aprint_error("%s: unable to establish interrupt",
795 sc->sc_dev.dv_xname);
796 if (intrstr != NULL)
797 aprint_normal(" at %s", intrstr);
798 aprint_normal("\n");
799 return;
800 }
801 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
802
803 /*
804 * Determine a few things about the bus we're connected to.
805 */
806 if (sc->sc_type < WM_T_82543) {
807 /* We don't really know the bus characteristics here. */
808 sc->sc_bus_speed = 33;
809 } else {
810 reg = CSR_READ(sc, WMREG_STATUS);
811 if (reg & STATUS_BUS64)
812 sc->sc_flags |= WM_F_BUS64;
813 if (sc->sc_type >= WM_T_82544 &&
814 (reg & STATUS_PCIX_MODE) != 0) {
815 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
816
817 sc->sc_flags |= WM_F_PCIX;
818 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
819 PCI_CAP_PCIX,
820 &sc->sc_pcix_offset, NULL) == 0)
821 aprint_error("%s: unable to find PCIX "
822 "capability\n", sc->sc_dev.dv_xname);
823 else if (sc->sc_type != WM_T_82545_3 &&
824 sc->sc_type != WM_T_82546_3) {
825 /*
826 * Work around a problem caused by the BIOS
827 * setting the max memory read byte count
828 * incorrectly.
829 */
830 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
831 sc->sc_pcix_offset + PCI_PCIX_CMD);
832 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
833 sc->sc_pcix_offset + PCI_PCIX_STATUS);
834
835 bytecnt =
836 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
837 PCI_PCIX_CMD_BYTECNT_SHIFT;
838 maxb =
839 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
840 PCI_PCIX_STATUS_MAXB_SHIFT;
841 if (bytecnt > maxb) {
842 aprint_verbose("%s: resetting PCI-X "
843 "MMRBC: %d -> %d\n",
844 sc->sc_dev.dv_xname,
845 512 << bytecnt, 512 << maxb);
846 pcix_cmd = (pcix_cmd &
847 ~PCI_PCIX_CMD_BYTECNT_MASK) |
848 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
849 pci_conf_write(pa->pa_pc, pa->pa_tag,
850 sc->sc_pcix_offset + PCI_PCIX_CMD,
851 pcix_cmd);
852 }
853 }
854 }
855 /*
856 * The quad port adapter is special; it has a PCIX-PCIX
857 * bridge on the board, and can run the secondary bus at
858 * a higher speed.
859 */
860 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
861 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
862 : 66;
863 } else if (sc->sc_flags & WM_F_PCIX) {
864 switch (reg & STATUS_PCIXSPD_MASK) {
865 case STATUS_PCIXSPD_50_66:
866 sc->sc_bus_speed = 66;
867 break;
868 case STATUS_PCIXSPD_66_100:
869 sc->sc_bus_speed = 100;
870 break;
871 case STATUS_PCIXSPD_100_133:
872 sc->sc_bus_speed = 133;
873 break;
874 default:
875 aprint_error(
876 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
877 sc->sc_dev.dv_xname,
878 reg & STATUS_PCIXSPD_MASK);
879 sc->sc_bus_speed = 66;
880 }
881 } else
882 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
883 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
884 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
885 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
886 }
887
888 /*
889 * Allocate the control data structures, and create and load the
890 * DMA map for it.
891 */
892 if ((error = bus_dmamem_alloc(sc->sc_dmat,
893 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
894 0)) != 0) {
895 aprint_error(
896 "%s: unable to allocate control data, error = %d\n",
897 sc->sc_dev.dv_xname, error);
898 goto fail_0;
899 }
900
901 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
902 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
903 0)) != 0) {
904 aprint_error("%s: unable to map control data, error = %d\n",
905 sc->sc_dev.dv_xname, error);
906 goto fail_1;
907 }
908
909 if ((error = bus_dmamap_create(sc->sc_dmat,
910 sizeof(struct wm_control_data), 1,
911 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
912 aprint_error("%s: unable to create control data DMA map, "
913 "error = %d\n", sc->sc_dev.dv_xname, error);
914 goto fail_2;
915 }
916
917 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
918 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
919 0)) != 0) {
920 aprint_error(
921 "%s: unable to load control data DMA map, error = %d\n",
922 sc->sc_dev.dv_xname, error);
923 goto fail_3;
924 }
925
926 /*
927 * Create the transmit buffer DMA maps.
928 */
929 for (i = 0; i < WM_TXQUEUELEN; i++) {
930 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
931 WM_NTXSEGS, MCLBYTES, 0, 0,
932 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
933 aprint_error("%s: unable to create Tx DMA map %d, "
934 "error = %d\n", sc->sc_dev.dv_xname, i, error);
935 goto fail_4;
936 }
937 }
938
939 /*
940 * Create the receive buffer DMA maps.
941 */
942 for (i = 0; i < WM_NRXDESC; i++) {
943 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
944 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
945 aprint_error("%s: unable to create Rx DMA map %d, "
946 "error = %d\n", sc->sc_dev.dv_xname, i, error);
947 goto fail_5;
948 }
949 sc->sc_rxsoft[i].rxs_mbuf = NULL;
950 }
951
952 /*
953 * Reset the chip to a known state.
954 */
955 wm_reset(sc);
956
957 /*
958 * Get some information about the EEPROM.
959 */
960 if (sc->sc_type >= WM_T_82540)
961 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
962 if (sc->sc_type <= WM_T_82544)
963 sc->sc_ee_addrbits = 6;
964 else if (sc->sc_type <= WM_T_82546_3) {
965 reg = CSR_READ(sc, WMREG_EECD);
966 if (reg & EECD_EE_SIZE)
967 sc->sc_ee_addrbits = 8;
968 else
969 sc->sc_ee_addrbits = 6;
970 } else if (sc->sc_type <= WM_T_82547_2) {
971 reg = CSR_READ(sc, WMREG_EECD);
972 if (reg & EECD_EE_TYPE) {
973 sc->sc_flags |= WM_F_EEPROM_SPI;
974 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
975 } else
976 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
977 } else {
978 /* Assume everything else is SPI. */
979 reg = CSR_READ(sc, WMREG_EECD);
980 sc->sc_flags |= WM_F_EEPROM_SPI;
981 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
982 }
983 if (sc->sc_flags & WM_F_EEPROM_SPI)
984 eetype = "SPI";
985 else
986 eetype = "MicroWire";
987 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
988 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
989 sc->sc_ee_addrbits, eetype);
990
991 /*
992 * Read the Ethernet address from the EEPROM.
993 */
994 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
995 sizeof(myea) / sizeof(myea[0]), myea)) {
996 aprint_error("%s: unable to read Ethernet address\n",
997 sc->sc_dev.dv_xname);
998 return;
999 }
1000 enaddr[0] = myea[0] & 0xff;
1001 enaddr[1] = myea[0] >> 8;
1002 enaddr[2] = myea[1] & 0xff;
1003 enaddr[3] = myea[1] >> 8;
1004 enaddr[4] = myea[2] & 0xff;
1005 enaddr[5] = myea[2] >> 8;
1006
1007 /*
1008 * Toggle the LSB of the MAC address on the second port
1009 * of the i82546.
1010 */
1011 if (sc->sc_type == WM_T_82546) {
1012 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1013 enaddr[5] ^= 1;
1014 }
1015
1016 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1017 ether_sprintf(enaddr));
1018
1019 /*
1020 * Read the config info from the EEPROM, and set up various
1021 * bits in the control registers based on their contents.
1022 */
1023 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1024 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1025 sc->sc_dev.dv_xname);
1026 return;
1027 }
1028 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1029 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1030 sc->sc_dev.dv_xname);
1031 return;
1032 }
1033 if (sc->sc_type >= WM_T_82544) {
1034 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1035 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1036 sc->sc_dev.dv_xname);
1037 return;
1038 }
1039 }
1040
1041 if (cfg1 & EEPROM_CFG1_ILOS)
1042 sc->sc_ctrl |= CTRL_ILOS;
1043 if (sc->sc_type >= WM_T_82544) {
1044 sc->sc_ctrl |=
1045 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1046 CTRL_SWDPIO_SHIFT;
1047 sc->sc_ctrl |=
1048 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1049 CTRL_SWDPINS_SHIFT;
1050 } else {
1051 sc->sc_ctrl |=
1052 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1053 CTRL_SWDPIO_SHIFT;
1054 }
1055
1056 #if 0
1057 if (sc->sc_type >= WM_T_82544) {
1058 if (cfg1 & EEPROM_CFG1_IPS0)
1059 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1060 if (cfg1 & EEPROM_CFG1_IPS1)
1061 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1062 sc->sc_ctrl_ext |=
1063 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1064 CTRL_EXT_SWDPIO_SHIFT;
1065 sc->sc_ctrl_ext |=
1066 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1067 CTRL_EXT_SWDPINS_SHIFT;
1068 } else {
1069 sc->sc_ctrl_ext |=
1070 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1071 CTRL_EXT_SWDPIO_SHIFT;
1072 }
1073 #endif
1074
1075 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1076 #if 0
1077 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1078 #endif
1079
1080 /*
1081 * Set up some register offsets that are different between
1082 * the i82542 and the i82543 and later chips.
1083 */
1084 if (sc->sc_type < WM_T_82543) {
1085 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1086 sc->sc_tdt_reg = WMREG_OLD_TDT;
1087 } else {
1088 sc->sc_rdt_reg = WMREG_RDT;
1089 sc->sc_tdt_reg = WMREG_TDT;
1090 }
1091
1092 /*
1093 * Determine if we should use flow control. We should
1094 * always use it, unless we're on a i82542 < 2.1.
1095 */
1096 if (sc->sc_type >= WM_T_82542_2_1)
1097 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1098
1099 /*
1100 * Determine if we're TBI or GMII mode, and initialize the
1101 * media structures accordingly.
1102 */
1103 if (sc->sc_type < WM_T_82543 ||
1104 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1105 if (wmp->wmp_flags & WMP_F_1000T)
1106 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1107 "product!\n", sc->sc_dev.dv_xname);
1108 wm_tbi_mediainit(sc);
1109 } else {
1110 if (wmp->wmp_flags & WMP_F_1000X)
1111 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1112 "product!\n", sc->sc_dev.dv_xname);
1113 wm_gmii_mediainit(sc);
1114 }
1115
1116 ifp = &sc->sc_ethercom.ec_if;
1117 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1118 ifp->if_softc = sc;
1119 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1120 ifp->if_ioctl = wm_ioctl;
1121 ifp->if_start = wm_start;
1122 ifp->if_watchdog = wm_watchdog;
1123 ifp->if_init = wm_init;
1124 ifp->if_stop = wm_stop;
1125 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1126 IFQ_SET_READY(&ifp->if_snd);
1127
1128 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1129
1130 /*
1131 * If we're a i82543 or greater, we can support VLANs.
1132 */
1133 if (sc->sc_type >= WM_T_82543)
1134 sc->sc_ethercom.ec_capabilities |=
1135 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1136
1137 /*
1138 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1139 * on i82543 and later.
1140 */
1141 if (sc->sc_type >= WM_T_82543)
1142 ifp->if_capabilities |=
1143 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1144
1145 /*
1146 * Attach the interface.
1147 */
1148 if_attach(ifp);
1149 ether_ifattach(ifp, enaddr);
1150 #if NRND > 0
1151 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1152 RND_TYPE_NET, 0);
1153 #endif
1154
1155 #ifdef WM_EVENT_COUNTERS
1156 /* Attach event counters. */
1157 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1158 NULL, sc->sc_dev.dv_xname, "txsstall");
1159 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1160 NULL, sc->sc_dev.dv_xname, "txdstall");
1161 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1162 NULL, sc->sc_dev.dv_xname, "txforceintr");
1163 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1164 NULL, sc->sc_dev.dv_xname, "txdw");
1165 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1166 NULL, sc->sc_dev.dv_xname, "txqe");
1167 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1168 NULL, sc->sc_dev.dv_xname, "rxintr");
1169 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1170 NULL, sc->sc_dev.dv_xname, "linkintr");
1171
1172 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1173 NULL, sc->sc_dev.dv_xname, "rxipsum");
1174 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1175 NULL, sc->sc_dev.dv_xname, "rxtusum");
1176 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1177 NULL, sc->sc_dev.dv_xname, "txipsum");
1178 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1179 NULL, sc->sc_dev.dv_xname, "txtusum");
1180
1181 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1182 NULL, sc->sc_dev.dv_xname, "txctx init");
1183 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1184 NULL, sc->sc_dev.dv_xname, "txctx hit");
1185 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1186 NULL, sc->sc_dev.dv_xname, "txctx miss");
1187
1188 for (i = 0; i < WM_NTXSEGS; i++)
1189 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1190 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1191
1192 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1193 NULL, sc->sc_dev.dv_xname, "txdrop");
1194
1195 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1196 NULL, sc->sc_dev.dv_xname, "tu");
1197 #endif /* WM_EVENT_COUNTERS */
1198
1199 /*
1200 * Make sure the interface is shutdown during reboot.
1201 */
1202 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1203 if (sc->sc_sdhook == NULL)
1204 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1205 sc->sc_dev.dv_xname);
1206 return;
1207
1208 /*
1209 * Free any resources we've allocated during the failed attach
1210 * attempt. Do this in reverse order and fall through.
1211 */
1212 fail_5:
1213 for (i = 0; i < WM_NRXDESC; i++) {
1214 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1215 bus_dmamap_destroy(sc->sc_dmat,
1216 sc->sc_rxsoft[i].rxs_dmamap);
1217 }
1218 fail_4:
1219 for (i = 0; i < WM_TXQUEUELEN; i++) {
1220 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1221 bus_dmamap_destroy(sc->sc_dmat,
1222 sc->sc_txsoft[i].txs_dmamap);
1223 }
1224 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1225 fail_3:
1226 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1227 fail_2:
1228 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1229 sizeof(struct wm_control_data));
1230 fail_1:
1231 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1232 fail_0:
1233 return;
1234 }
1235
1236 /*
1237 * wm_shutdown:
1238 *
1239 * Make sure the interface is stopped at reboot time.
1240 */
1241 static void
1242 wm_shutdown(void *arg)
1243 {
1244 struct wm_softc *sc = arg;
1245
1246 wm_stop(&sc->sc_ethercom.ec_if, 1);
1247 }
1248
1249 /*
1250 * wm_tx_cksum:
1251 *
1252 * Set up TCP/IP checksumming parameters for the
1253 * specified packet.
1254 */
1255 static int
1256 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1257 uint32_t *fieldsp)
1258 {
1259 struct mbuf *m0 = txs->txs_mbuf;
1260 struct livengood_tcpip_ctxdesc *t;
1261 uint32_t fields = 0, ipcs, tucs;
1262 struct ip *ip;
1263 struct ether_header *eh;
1264 int offset, iphl;
1265
1266 /*
1267 * XXX It would be nice if the mbuf pkthdr had offset
1268 * fields for the protocol headers.
1269 */
1270
1271 eh = mtod(m0, struct ether_header *);
1272 switch (htons(eh->ether_type)) {
1273 case ETHERTYPE_IP:
1274 iphl = sizeof(struct ip);
1275 offset = ETHER_HDR_LEN;
1276 break;
1277
1278 case ETHERTYPE_VLAN:
1279 iphl = sizeof(struct ip);
1280 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1281 break;
1282
1283 default:
1284 /*
1285 * Don't support this protocol or encapsulation.
1286 */
1287 *fieldsp = 0;
1288 *cmdp = 0;
1289 return (0);
1290 }
1291
1292 if (m0->m_len < (offset + iphl)) {
1293 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1294 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1295 "packet dropped\n", sc->sc_dev.dv_xname);
1296 return (ENOMEM);
1297 }
1298 m0 = txs->txs_mbuf;
1299 }
1300
1301 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1302 iphl = ip->ip_hl << 2;
1303
1304 /*
1305 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1306 * offload feature, if we load the context descriptor, we
1307 * MUST provide valid values for IPCSS and TUCSS fields.
1308 */
1309
1310 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1311 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1312 fields |= htole32(WTX_IXSM);
1313 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1314 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1315 WTX_TCPIP_IPCSE(offset + iphl - 1));
1316 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1317 /* Use the cached value. */
1318 ipcs = sc->sc_txctx_ipcs;
1319 } else {
1320 /* Just initialize it to the likely value anyway. */
1321 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1322 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1323 WTX_TCPIP_IPCSE(offset + iphl - 1));
1324 }
1325
1326 offset += iphl;
1327
1328 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1329 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1330 fields |= htole32(WTX_TXSM);
1331 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1332 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1333 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1334 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1335 /* Use the cached value. */
1336 tucs = sc->sc_txctx_tucs;
1337 } else {
1338 /* Just initialize it to a valid TCP context. */
1339 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1340 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1341 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1342 }
1343
1344 if (sc->sc_txctx_ipcs == ipcs &&
1345 sc->sc_txctx_tucs == tucs) {
1346 /* Cached context is fine. */
1347 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1348 } else {
1349 /* Fill in the context descriptor. */
1350 #ifdef WM_EVENT_COUNTERS
1351 if (sc->sc_txctx_ipcs == 0xffffffff &&
1352 sc->sc_txctx_tucs == 0xffffffff)
1353 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1354 else
1355 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1356 #endif
1357 t = (struct livengood_tcpip_ctxdesc *)
1358 &sc->sc_txdescs[sc->sc_txnext];
1359 t->tcpip_ipcs = ipcs;
1360 t->tcpip_tucs = tucs;
1361 t->tcpip_cmdlen =
1362 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1363 t->tcpip_seg = 0;
1364 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1365
1366 sc->sc_txctx_ipcs = ipcs;
1367 sc->sc_txctx_tucs = tucs;
1368
1369 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1370 txs->txs_ndesc++;
1371 }
1372
1373 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1374 *fieldsp = fields;
1375
1376 return (0);
1377 }
1378
1379 /*
1380 * wm_start: [ifnet interface function]
1381 *
1382 * Start packet transmission on the interface.
1383 */
1384 static void
1385 wm_start(struct ifnet *ifp)
1386 {
1387 struct wm_softc *sc = ifp->if_softc;
1388 struct mbuf *m0;
1389 #if 0 /* XXXJRT */
1390 struct m_tag *mtag;
1391 #endif
1392 struct wm_txsoft *txs;
1393 bus_dmamap_t dmamap;
1394 int error, nexttx, lasttx = -1, ofree, seg;
1395 uint32_t cksumcmd, cksumfields;
1396
1397 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1398 return;
1399
1400 /*
1401 * Remember the previous number of free descriptors.
1402 */
1403 ofree = sc->sc_txfree;
1404
1405 /*
1406 * Loop through the send queue, setting up transmit descriptors
1407 * until we drain the queue, or use up all available transmit
1408 * descriptors.
1409 */
1410 for (;;) {
1411 /* Grab a packet off the queue. */
1412 IFQ_POLL(&ifp->if_snd, m0);
1413 if (m0 == NULL)
1414 break;
1415
1416 DPRINTF(WM_DEBUG_TX,
1417 ("%s: TX: have packet to transmit: %p\n",
1418 sc->sc_dev.dv_xname, m0));
1419
1420 /* Get a work queue entry. */
1421 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1422 wm_txintr(sc);
1423 if (sc->sc_txsfree == 0) {
1424 DPRINTF(WM_DEBUG_TX,
1425 ("%s: TX: no free job descriptors\n",
1426 sc->sc_dev.dv_xname));
1427 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1428 break;
1429 }
1430 }
1431
1432 txs = &sc->sc_txsoft[sc->sc_txsnext];
1433 dmamap = txs->txs_dmamap;
1434
1435 /*
1436 * Load the DMA map. If this fails, the packet either
1437 * didn't fit in the allotted number of segments, or we
1438 * were short on resources. For the too-many-segments
1439 * case, we simply report an error and drop the packet,
1440 * since we can't sanely copy a jumbo packet to a single
1441 * buffer.
1442 */
1443 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1444 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1445 if (error) {
1446 if (error == EFBIG) {
1447 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1448 printf("%s: Tx packet consumes too many "
1449 "DMA segments, dropping...\n",
1450 sc->sc_dev.dv_xname);
1451 IFQ_DEQUEUE(&ifp->if_snd, m0);
1452 m_freem(m0);
1453 continue;
1454 }
1455 /*
1456 * Short on resources, just stop for now.
1457 */
1458 DPRINTF(WM_DEBUG_TX,
1459 ("%s: TX: dmamap load failed: %d\n",
1460 sc->sc_dev.dv_xname, error));
1461 break;
1462 }
1463
1464 /*
1465 * Ensure we have enough descriptors free to describe
1466 * the packet. Note, we always reserve one descriptor
1467 * at the end of the ring due to the semantics of the
1468 * TDT register, plus one more in the event we need
1469 * to re-load checksum offload context.
1470 */
1471 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1472 /*
1473 * Not enough free descriptors to transmit this
1474 * packet. We haven't committed anything yet,
1475 * so just unload the DMA map, put the packet
1476 * pack on the queue, and punt. Notify the upper
1477 * layer that there are no more slots left.
1478 */
1479 DPRINTF(WM_DEBUG_TX,
1480 ("%s: TX: need %d descriptors, have %d\n",
1481 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1482 sc->sc_txfree - 1));
1483 ifp->if_flags |= IFF_OACTIVE;
1484 bus_dmamap_unload(sc->sc_dmat, dmamap);
1485 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1486 break;
1487 }
1488
1489 IFQ_DEQUEUE(&ifp->if_snd, m0);
1490
1491 /*
1492 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1493 */
1494
1495 /* Sync the DMA map. */
1496 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1497 BUS_DMASYNC_PREWRITE);
1498
1499 DPRINTF(WM_DEBUG_TX,
1500 ("%s: TX: packet has %d DMA segments\n",
1501 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1502
1503 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1504
1505 /*
1506 * Store a pointer to the packet so that we can free it
1507 * later.
1508 *
1509 * Initially, we consider the number of descriptors the
1510 * packet uses the number of DMA segments. This may be
1511 * incremented by 1 if we do checksum offload (a descriptor
1512 * is used to set the checksum context).
1513 */
1514 txs->txs_mbuf = m0;
1515 txs->txs_firstdesc = sc->sc_txnext;
1516 txs->txs_ndesc = dmamap->dm_nsegs;
1517
1518 /*
1519 * Set up checksum offload parameters for
1520 * this packet.
1521 */
1522 if (m0->m_pkthdr.csum_flags &
1523 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1524 if (wm_tx_cksum(sc, txs, &cksumcmd,
1525 &cksumfields) != 0) {
1526 /* Error message already displayed. */
1527 bus_dmamap_unload(sc->sc_dmat, dmamap);
1528 continue;
1529 }
1530 } else {
1531 cksumcmd = 0;
1532 cksumfields = 0;
1533 }
1534
1535 cksumcmd |= htole32(WTX_CMD_IDE);
1536
1537 /*
1538 * Initialize the transmit descriptor.
1539 */
1540 for (nexttx = sc->sc_txnext, seg = 0;
1541 seg < dmamap->dm_nsegs;
1542 seg++, nexttx = WM_NEXTTX(nexttx)) {
1543 /*
1544 * Note: we currently only use 32-bit DMA
1545 * addresses.
1546 */
1547 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1548 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1549 htole32(dmamap->dm_segs[seg].ds_addr);
1550 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1551 htole32(dmamap->dm_segs[seg].ds_len);
1552 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1553 cksumfields;
1554 lasttx = nexttx;
1555
1556 DPRINTF(WM_DEBUG_TX,
1557 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1558 sc->sc_dev.dv_xname, nexttx,
1559 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1560 (uint32_t) dmamap->dm_segs[seg].ds_len));
1561 }
1562
1563 KASSERT(lasttx != -1);
1564
1565 /*
1566 * Set up the command byte on the last descriptor of
1567 * the packet. If we're in the interrupt delay window,
1568 * delay the interrupt.
1569 */
1570 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1571 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1572
1573 #if 0 /* XXXJRT */
1574 /*
1575 * If VLANs are enabled and the packet has a VLAN tag, set
1576 * up the descriptor to encapsulate the packet for us.
1577 *
1578 * This is only valid on the last descriptor of the packet.
1579 */
1580 if (sc->sc_ethercom.ec_nvlans != 0 &&
1581 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1582 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1583 htole32(WTX_CMD_VLE);
1584 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1585 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1586 }
1587 #endif /* XXXJRT */
1588
1589 txs->txs_lastdesc = lasttx;
1590
1591 DPRINTF(WM_DEBUG_TX,
1592 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1593 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1594
1595 /* Sync the descriptors we're using. */
1596 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1597 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1598
1599 /* Give the packet to the chip. */
1600 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1601
1602 DPRINTF(WM_DEBUG_TX,
1603 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1604
1605 DPRINTF(WM_DEBUG_TX,
1606 ("%s: TX: finished transmitting packet, job %d\n",
1607 sc->sc_dev.dv_xname, sc->sc_txsnext));
1608
1609 /* Advance the tx pointer. */
1610 sc->sc_txfree -= txs->txs_ndesc;
1611 sc->sc_txnext = nexttx;
1612
1613 sc->sc_txsfree--;
1614 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1615
1616 #if NBPFILTER > 0
1617 /* Pass the packet to any BPF listeners. */
1618 if (ifp->if_bpf)
1619 bpf_mtap(ifp->if_bpf, m0);
1620 #endif /* NBPFILTER > 0 */
1621 }
1622
1623 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1624 /* No more slots; notify upper layer. */
1625 ifp->if_flags |= IFF_OACTIVE;
1626 }
1627
1628 if (sc->sc_txfree != ofree) {
1629 /* Set a watchdog timer in case the chip flakes out. */
1630 ifp->if_timer = 5;
1631 }
1632 }
1633
1634 /*
1635 * wm_watchdog: [ifnet interface function]
1636 *
1637 * Watchdog timer handler.
1638 */
1639 static void
1640 wm_watchdog(struct ifnet *ifp)
1641 {
1642 struct wm_softc *sc = ifp->if_softc;
1643
1644 /*
1645 * Since we're using delayed interrupts, sweep up
1646 * before we report an error.
1647 */
1648 wm_txintr(sc);
1649
1650 if (sc->sc_txfree != WM_NTXDESC) {
1651 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1652 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1653 sc->sc_txnext);
1654 ifp->if_oerrors++;
1655
1656 /* Reset the interface. */
1657 (void) wm_init(ifp);
1658 }
1659
1660 /* Try to get more packets going. */
1661 wm_start(ifp);
1662 }
1663
1664 /*
1665 * wm_ioctl: [ifnet interface function]
1666 *
1667 * Handle control requests from the operator.
1668 */
1669 static int
1670 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1671 {
1672 struct wm_softc *sc = ifp->if_softc;
1673 struct ifreq *ifr = (struct ifreq *) data;
1674 int s, error;
1675
1676 s = splnet();
1677
1678 switch (cmd) {
1679 case SIOCSIFMEDIA:
1680 case SIOCGIFMEDIA:
1681 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1682 break;
1683 default:
1684 error = ether_ioctl(ifp, cmd, data);
1685 if (error == ENETRESET) {
1686 /*
1687 * Multicast list has changed; set the hardware filter
1688 * accordingly.
1689 */
1690 wm_set_filter(sc);
1691 error = 0;
1692 }
1693 break;
1694 }
1695
1696 /* Try to get more packets going. */
1697 wm_start(ifp);
1698
1699 splx(s);
1700 return (error);
1701 }
1702
1703 /*
1704 * wm_intr:
1705 *
1706 * Interrupt service routine.
1707 */
1708 static int
1709 wm_intr(void *arg)
1710 {
1711 struct wm_softc *sc = arg;
1712 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1713 uint32_t icr;
1714 int wantinit, handled = 0;
1715
1716 for (wantinit = 0; wantinit == 0;) {
1717 icr = CSR_READ(sc, WMREG_ICR);
1718 if ((icr & sc->sc_icr) == 0)
1719 break;
1720
1721 #if 0 /*NRND > 0*/
1722 if (RND_ENABLED(&sc->rnd_source))
1723 rnd_add_uint32(&sc->rnd_source, icr);
1724 #endif
1725
1726 handled = 1;
1727
1728 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1729 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1730 DPRINTF(WM_DEBUG_RX,
1731 ("%s: RX: got Rx intr 0x%08x\n",
1732 sc->sc_dev.dv_xname,
1733 icr & (ICR_RXDMT0|ICR_RXT0)));
1734 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1735 }
1736 #endif
1737 wm_rxintr(sc);
1738
1739 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1740 if (icr & ICR_TXDW) {
1741 DPRINTF(WM_DEBUG_TX,
1742 ("%s: TX: got TDXW interrupt\n",
1743 sc->sc_dev.dv_xname));
1744 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1745 }
1746 #endif
1747 wm_txintr(sc);
1748
1749 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1750 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1751 wm_linkintr(sc, icr);
1752 }
1753
1754 if (icr & ICR_RXO) {
1755 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1756 wantinit = 1;
1757 }
1758 }
1759
1760 if (handled) {
1761 if (wantinit)
1762 wm_init(ifp);
1763
1764 /* Try to get more packets going. */
1765 wm_start(ifp);
1766 }
1767
1768 return (handled);
1769 }
1770
1771 /*
1772 * wm_txintr:
1773 *
1774 * Helper; handle transmit interrupts.
1775 */
1776 static void
1777 wm_txintr(struct wm_softc *sc)
1778 {
1779 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1780 struct wm_txsoft *txs;
1781 uint8_t status;
1782 int i;
1783
1784 ifp->if_flags &= ~IFF_OACTIVE;
1785
1786 /*
1787 * Go through the Tx list and free mbufs for those
1788 * frames which have been transmitted.
1789 */
1790 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1791 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1792 txs = &sc->sc_txsoft[i];
1793
1794 DPRINTF(WM_DEBUG_TX,
1795 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1796
1797 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1798 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1799
1800 status = le32toh(sc->sc_txdescs[
1801 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1802 if ((status & WTX_ST_DD) == 0) {
1803 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1804 BUS_DMASYNC_PREREAD);
1805 break;
1806 }
1807
1808 DPRINTF(WM_DEBUG_TX,
1809 ("%s: TX: job %d done: descs %d..%d\n",
1810 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1811 txs->txs_lastdesc));
1812
1813 /*
1814 * XXX We should probably be using the statistics
1815 * XXX registers, but I don't know if they exist
1816 * XXX on chips before the i82544.
1817 */
1818
1819 #ifdef WM_EVENT_COUNTERS
1820 if (status & WTX_ST_TU)
1821 WM_EVCNT_INCR(&sc->sc_ev_tu);
1822 #endif /* WM_EVENT_COUNTERS */
1823
1824 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1825 ifp->if_oerrors++;
1826 if (status & WTX_ST_LC)
1827 printf("%s: late collision\n",
1828 sc->sc_dev.dv_xname);
1829 else if (status & WTX_ST_EC) {
1830 ifp->if_collisions += 16;
1831 printf("%s: excessive collisions\n",
1832 sc->sc_dev.dv_xname);
1833 }
1834 } else
1835 ifp->if_opackets++;
1836
1837 sc->sc_txfree += txs->txs_ndesc;
1838 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1839 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1840 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1841 m_freem(txs->txs_mbuf);
1842 txs->txs_mbuf = NULL;
1843 }
1844
1845 /* Update the dirty transmit buffer pointer. */
1846 sc->sc_txsdirty = i;
1847 DPRINTF(WM_DEBUG_TX,
1848 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1849
1850 /*
1851 * If there are no more pending transmissions, cancel the watchdog
1852 * timer.
1853 */
1854 if (sc->sc_txsfree == WM_TXQUEUELEN)
1855 ifp->if_timer = 0;
1856 }
1857
1858 /*
1859 * wm_rxintr:
1860 *
1861 * Helper; handle receive interrupts.
1862 */
1863 static void
1864 wm_rxintr(struct wm_softc *sc)
1865 {
1866 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1867 struct wm_rxsoft *rxs;
1868 struct mbuf *m;
1869 int i, len;
1870 uint8_t status, errors;
1871
1872 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1873 rxs = &sc->sc_rxsoft[i];
1874
1875 DPRINTF(WM_DEBUG_RX,
1876 ("%s: RX: checking descriptor %d\n",
1877 sc->sc_dev.dv_xname, i));
1878
1879 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1880
1881 status = sc->sc_rxdescs[i].wrx_status;
1882 errors = sc->sc_rxdescs[i].wrx_errors;
1883 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1884
1885 if ((status & WRX_ST_DD) == 0) {
1886 /*
1887 * We have processed all of the receive descriptors.
1888 */
1889 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1890 break;
1891 }
1892
1893 if (__predict_false(sc->sc_rxdiscard)) {
1894 DPRINTF(WM_DEBUG_RX,
1895 ("%s: RX: discarding contents of descriptor %d\n",
1896 sc->sc_dev.dv_xname, i));
1897 WM_INIT_RXDESC(sc, i);
1898 if (status & WRX_ST_EOP) {
1899 /* Reset our state. */
1900 DPRINTF(WM_DEBUG_RX,
1901 ("%s: RX: resetting rxdiscard -> 0\n",
1902 sc->sc_dev.dv_xname));
1903 sc->sc_rxdiscard = 0;
1904 }
1905 continue;
1906 }
1907
1908 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1909 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1910
1911 m = rxs->rxs_mbuf;
1912
1913 /*
1914 * Add a new receive buffer to the ring.
1915 */
1916 if (wm_add_rxbuf(sc, i) != 0) {
1917 /*
1918 * Failed, throw away what we've done so
1919 * far, and discard the rest of the packet.
1920 */
1921 ifp->if_ierrors++;
1922 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1923 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1924 WM_INIT_RXDESC(sc, i);
1925 if ((status & WRX_ST_EOP) == 0)
1926 sc->sc_rxdiscard = 1;
1927 if (sc->sc_rxhead != NULL)
1928 m_freem(sc->sc_rxhead);
1929 WM_RXCHAIN_RESET(sc);
1930 DPRINTF(WM_DEBUG_RX,
1931 ("%s: RX: Rx buffer allocation failed, "
1932 "dropping packet%s\n", sc->sc_dev.dv_xname,
1933 sc->sc_rxdiscard ? " (discard)" : ""));
1934 continue;
1935 }
1936
1937 WM_RXCHAIN_LINK(sc, m);
1938
1939 m->m_len = len;
1940
1941 DPRINTF(WM_DEBUG_RX,
1942 ("%s: RX: buffer at %p len %d\n",
1943 sc->sc_dev.dv_xname, m->m_data, len));
1944
1945 /*
1946 * If this is not the end of the packet, keep
1947 * looking.
1948 */
1949 if ((status & WRX_ST_EOP) == 0) {
1950 sc->sc_rxlen += len;
1951 DPRINTF(WM_DEBUG_RX,
1952 ("%s: RX: not yet EOP, rxlen -> %d\n",
1953 sc->sc_dev.dv_xname, sc->sc_rxlen));
1954 continue;
1955 }
1956
1957 /*
1958 * Okay, we have the entire packet now...
1959 */
1960 *sc->sc_rxtailp = NULL;
1961 m = sc->sc_rxhead;
1962 len += sc->sc_rxlen;
1963
1964 WM_RXCHAIN_RESET(sc);
1965
1966 DPRINTF(WM_DEBUG_RX,
1967 ("%s: RX: have entire packet, len -> %d\n",
1968 sc->sc_dev.dv_xname, len));
1969
1970 /*
1971 * If an error occurred, update stats and drop the packet.
1972 */
1973 if (errors &
1974 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1975 ifp->if_ierrors++;
1976 if (errors & WRX_ER_SE)
1977 printf("%s: symbol error\n",
1978 sc->sc_dev.dv_xname);
1979 else if (errors & WRX_ER_SEQ)
1980 printf("%s: receive sequence error\n",
1981 sc->sc_dev.dv_xname);
1982 else if (errors & WRX_ER_CE)
1983 printf("%s: CRC error\n",
1984 sc->sc_dev.dv_xname);
1985 m_freem(m);
1986 continue;
1987 }
1988
1989 /*
1990 * No errors. Receive the packet.
1991 *
1992 * Note, we have configured the chip to include the
1993 * CRC with every packet.
1994 */
1995 m->m_flags |= M_HASFCS;
1996 m->m_pkthdr.rcvif = ifp;
1997 m->m_pkthdr.len = len;
1998
1999 #if 0 /* XXXJRT */
2000 /*
2001 * If VLANs are enabled, VLAN packets have been unwrapped
2002 * for us. Associate the tag with the packet.
2003 */
2004 if (sc->sc_ethercom.ec_nvlans != 0 &&
2005 (status & WRX_ST_VP) != 0) {
2006 struct m_tag *vtag;
2007
2008 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2009 M_NOWAIT);
2010 if (vtag == NULL) {
2011 ifp->if_ierrors++;
2012 printf("%s: unable to allocate VLAN tag\n",
2013 sc->sc_dev.dv_xname);
2014 m_freem(m);
2015 continue;
2016 }
2017
2018 *(u_int *)(vtag + 1) =
2019 le16toh(sc->sc_rxdescs[i].wrx_special);
2020 }
2021 #endif /* XXXJRT */
2022
2023 /*
2024 * Set up checksum info for this packet.
2025 */
2026 if (status & WRX_ST_IPCS) {
2027 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2028 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2029 if (errors & WRX_ER_IPE)
2030 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2031 }
2032 if (status & WRX_ST_TCPCS) {
2033 /*
2034 * Note: we don't know if this was TCP or UDP,
2035 * so we just set both bits, and expect the
2036 * upper layers to deal.
2037 */
2038 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2039 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2040 if (errors & WRX_ER_TCPE)
2041 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2042 }
2043
2044 ifp->if_ipackets++;
2045
2046 #if NBPFILTER > 0
2047 /* Pass this up to any BPF listeners. */
2048 if (ifp->if_bpf)
2049 bpf_mtap(ifp->if_bpf, m);
2050 #endif /* NBPFILTER > 0 */
2051
2052 /* Pass it on. */
2053 (*ifp->if_input)(ifp, m);
2054 }
2055
2056 /* Update the receive pointer. */
2057 sc->sc_rxptr = i;
2058
2059 DPRINTF(WM_DEBUG_RX,
2060 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2061 }
2062
2063 /*
2064 * wm_linkintr:
2065 *
2066 * Helper; handle link interrupts.
2067 */
2068 static void
2069 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2070 {
2071 uint32_t status;
2072
2073 /*
2074 * If we get a link status interrupt on a 1000BASE-T
2075 * device, just fall into the normal MII tick path.
2076 */
2077 if (sc->sc_flags & WM_F_HAS_MII) {
2078 if (icr & ICR_LSC) {
2079 DPRINTF(WM_DEBUG_LINK,
2080 ("%s: LINK: LSC -> mii_tick\n",
2081 sc->sc_dev.dv_xname));
2082 mii_tick(&sc->sc_mii);
2083 } else if (icr & ICR_RXSEQ) {
2084 DPRINTF(WM_DEBUG_LINK,
2085 ("%s: LINK Receive sequence error\n",
2086 sc->sc_dev.dv_xname));
2087 }
2088 return;
2089 }
2090
2091 /*
2092 * If we are now receiving /C/, check for link again in
2093 * a couple of link clock ticks.
2094 */
2095 if (icr & ICR_RXCFG) {
2096 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2097 sc->sc_dev.dv_xname));
2098 sc->sc_tbi_anstate = 2;
2099 }
2100
2101 if (icr & ICR_LSC) {
2102 status = CSR_READ(sc, WMREG_STATUS);
2103 if (status & STATUS_LU) {
2104 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2105 sc->sc_dev.dv_xname,
2106 (status & STATUS_FD) ? "FDX" : "HDX"));
2107 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2108 if (status & STATUS_FD)
2109 sc->sc_tctl |=
2110 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2111 else
2112 sc->sc_tctl |=
2113 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2114 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2115 sc->sc_tbi_linkup = 1;
2116 } else {
2117 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2118 sc->sc_dev.dv_xname));
2119 sc->sc_tbi_linkup = 0;
2120 }
2121 sc->sc_tbi_anstate = 2;
2122 wm_tbi_set_linkled(sc);
2123 } else if (icr & ICR_RXSEQ) {
2124 DPRINTF(WM_DEBUG_LINK,
2125 ("%s: LINK: Receive sequence error\n",
2126 sc->sc_dev.dv_xname));
2127 }
2128 }
2129
2130 /*
2131 * wm_tick:
2132 *
2133 * One second timer, used to check link status, sweep up
2134 * completed transmit jobs, etc.
2135 */
2136 static void
2137 wm_tick(void *arg)
2138 {
2139 struct wm_softc *sc = arg;
2140 int s;
2141
2142 s = splnet();
2143
2144 if (sc->sc_flags & WM_F_HAS_MII)
2145 mii_tick(&sc->sc_mii);
2146 else
2147 wm_tbi_check_link(sc);
2148
2149 splx(s);
2150
2151 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2152 }
2153
2154 /*
2155 * wm_reset:
2156 *
2157 * Reset the i82542 chip.
2158 */
2159 static void
2160 wm_reset(struct wm_softc *sc)
2161 {
2162 int i;
2163
2164 switch (sc->sc_type) {
2165 case WM_T_82544:
2166 case WM_T_82540:
2167 case WM_T_82545:
2168 case WM_T_82546:
2169 case WM_T_82541:
2170 case WM_T_82541_2:
2171 /*
2172 * These chips have a problem with the memory-mapped
2173 * write cycle when issuing the reset, so use I/O-mapped
2174 * access, if possible.
2175 */
2176 if (sc->sc_flags & WM_F_IOH_VALID)
2177 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2178 else
2179 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2180 break;
2181
2182 case WM_T_82545_3:
2183 case WM_T_82546_3:
2184 /* Use the shadow control register on these chips. */
2185 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2186 break;
2187
2188 default:
2189 /* Everything else can safely use the documented method. */
2190 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2191 break;
2192 }
2193 delay(10000);
2194
2195 for (i = 0; i < 1000; i++) {
2196 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2197 return;
2198 delay(20);
2199 }
2200
2201 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2202 printf("%s: WARNING: reset failed to complete\n",
2203 sc->sc_dev.dv_xname);
2204 }
2205
2206 /*
2207 * wm_init: [ifnet interface function]
2208 *
2209 * Initialize the interface. Must be called at splnet().
2210 */
2211 static int
2212 wm_init(struct ifnet *ifp)
2213 {
2214 struct wm_softc *sc = ifp->if_softc;
2215 struct wm_rxsoft *rxs;
2216 int i, error = 0;
2217 uint32_t reg;
2218
2219 /*
2220 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2221 * There is a small but measurable benefit to avoiding the adjusment
2222 * of the descriptor so that the headers are aligned, for normal mtu,
2223 * on such platforms. One possibility is that the DMA itself is
2224 * slightly more efficient if the front of the entire packet (instead
2225 * of the front of the headers) is aligned.
2226 *
2227 * Note we must always set align_tweak to 0 if we are using
2228 * jumbo frames.
2229 */
2230 #ifdef __NO_STRICT_ALIGNMENT
2231 sc->sc_align_tweak = 0;
2232 #else
2233 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2234 sc->sc_align_tweak = 0;
2235 else
2236 sc->sc_align_tweak = 2;
2237 #endif /* __NO_STRICT_ALIGNMENT */
2238
2239 /* Cancel any pending I/O. */
2240 wm_stop(ifp, 0);
2241
2242 /* Reset the chip to a known state. */
2243 wm_reset(sc);
2244
2245 /* Initialize the transmit descriptor ring. */
2246 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2247 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2248 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2249 sc->sc_txfree = WM_NTXDESC;
2250 sc->sc_txnext = 0;
2251
2252 sc->sc_txctx_ipcs = 0xffffffff;
2253 sc->sc_txctx_tucs = 0xffffffff;
2254
2255 if (sc->sc_type < WM_T_82543) {
2256 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2257 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2258 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2259 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2260 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2261 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2262 } else {
2263 CSR_WRITE(sc, WMREG_TBDAH, 0);
2264 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2265 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2266 CSR_WRITE(sc, WMREG_TDH, 0);
2267 CSR_WRITE(sc, WMREG_TDT, 0);
2268 CSR_WRITE(sc, WMREG_TIDV, 128);
2269
2270 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2271 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2272 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2273 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2274 }
2275 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2276 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2277
2278 /* Initialize the transmit job descriptors. */
2279 for (i = 0; i < WM_TXQUEUELEN; i++)
2280 sc->sc_txsoft[i].txs_mbuf = NULL;
2281 sc->sc_txsfree = WM_TXQUEUELEN;
2282 sc->sc_txsnext = 0;
2283 sc->sc_txsdirty = 0;
2284
2285 /*
2286 * Initialize the receive descriptor and receive job
2287 * descriptor rings.
2288 */
2289 if (sc->sc_type < WM_T_82543) {
2290 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2291 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2292 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2293 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2294 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2295 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2296
2297 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2298 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2299 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2300 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2301 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2302 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2303 } else {
2304 CSR_WRITE(sc, WMREG_RDBAH, 0);
2305 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2306 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2307 CSR_WRITE(sc, WMREG_RDH, 0);
2308 CSR_WRITE(sc, WMREG_RDT, 0);
2309 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2310 }
2311 for (i = 0; i < WM_NRXDESC; i++) {
2312 rxs = &sc->sc_rxsoft[i];
2313 if (rxs->rxs_mbuf == NULL) {
2314 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2315 printf("%s: unable to allocate or map rx "
2316 "buffer %d, error = %d\n",
2317 sc->sc_dev.dv_xname, i, error);
2318 /*
2319 * XXX Should attempt to run with fewer receive
2320 * XXX buffers instead of just failing.
2321 */
2322 wm_rxdrain(sc);
2323 goto out;
2324 }
2325 } else
2326 WM_INIT_RXDESC(sc, i);
2327 }
2328 sc->sc_rxptr = 0;
2329 sc->sc_rxdiscard = 0;
2330 WM_RXCHAIN_RESET(sc);
2331
2332 /*
2333 * Clear out the VLAN table -- we don't use it (yet).
2334 */
2335 CSR_WRITE(sc, WMREG_VET, 0);
2336 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2337 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2338
2339 /*
2340 * Set up flow-control parameters.
2341 *
2342 * XXX Values could probably stand some tuning.
2343 */
2344 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2345 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2346 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2347 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2348
2349 if (sc->sc_type < WM_T_82543) {
2350 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2351 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2352 } else {
2353 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2354 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2355 }
2356 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2357 }
2358
2359 #if 0 /* XXXJRT */
2360 /* Deal with VLAN enables. */
2361 if (sc->sc_ethercom.ec_nvlans != 0)
2362 sc->sc_ctrl |= CTRL_VME;
2363 else
2364 #endif /* XXXJRT */
2365 sc->sc_ctrl &= ~CTRL_VME;
2366
2367 /* Write the control registers. */
2368 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2369 #if 0
2370 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2371 #endif
2372
2373 /*
2374 * Set up checksum offload parameters.
2375 */
2376 reg = CSR_READ(sc, WMREG_RXCSUM);
2377 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2378 reg |= RXCSUM_IPOFL;
2379 else
2380 reg &= ~RXCSUM_IPOFL;
2381 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2382 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2383 else {
2384 reg &= ~RXCSUM_TUOFL;
2385 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2386 reg &= ~RXCSUM_IPOFL;
2387 }
2388 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2389
2390 /*
2391 * Set up the interrupt registers.
2392 */
2393 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2394 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2395 ICR_RXO | ICR_RXT0;
2396 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2397 sc->sc_icr |= ICR_RXCFG;
2398 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2399
2400 /* Set up the inter-packet gap. */
2401 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2402
2403 #if 0 /* XXXJRT */
2404 /* Set the VLAN ethernetype. */
2405 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2406 #endif
2407
2408 /*
2409 * Set up the transmit control register; we start out with
2410 * a collision distance suitable for FDX, but update it whe
2411 * we resolve the media type.
2412 */
2413 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2414 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2415 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2416
2417 /* Set the media. */
2418 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2419
2420 /*
2421 * Set up the receive control register; we actually program
2422 * the register when we set the receive filter. Use multicast
2423 * address offset type 0.
2424 *
2425 * Only the i82544 has the ability to strip the incoming
2426 * CRC, so we don't enable that feature.
2427 */
2428 sc->sc_mchash_type = 0;
2429 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2430 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2431
2432 if(MCLBYTES == 2048) {
2433 sc->sc_rctl |= RCTL_2k;
2434 } else {
2435 /*
2436 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2437 * XXX segments, dropping" -- why?
2438 */
2439 #if 0
2440 if(sc->sc_type >= WM_T_82543) {
2441 switch(MCLBYTES) {
2442 case 4096:
2443 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2444 break;
2445 case 8192:
2446 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2447 break;
2448 case 16384:
2449 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2450 break;
2451 default:
2452 panic("wm_init: MCLBYTES %d unsupported",
2453 MCLBYTES);
2454 break;
2455 }
2456 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2457 #else
2458 panic("wm_init: MCLBYTES > 2048 not supported.");
2459 #endif
2460 }
2461
2462 /* Set the receive filter. */
2463 wm_set_filter(sc);
2464
2465 /* Start the one second link check clock. */
2466 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2467
2468 /* ...all done! */
2469 ifp->if_flags |= IFF_RUNNING;
2470 ifp->if_flags &= ~IFF_OACTIVE;
2471
2472 out:
2473 if (error)
2474 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2475 return (error);
2476 }
2477
2478 /*
2479 * wm_rxdrain:
2480 *
2481 * Drain the receive queue.
2482 */
2483 static void
2484 wm_rxdrain(struct wm_softc *sc)
2485 {
2486 struct wm_rxsoft *rxs;
2487 int i;
2488
2489 for (i = 0; i < WM_NRXDESC; i++) {
2490 rxs = &sc->sc_rxsoft[i];
2491 if (rxs->rxs_mbuf != NULL) {
2492 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2493 m_freem(rxs->rxs_mbuf);
2494 rxs->rxs_mbuf = NULL;
2495 }
2496 }
2497 }
2498
2499 /*
2500 * wm_stop: [ifnet interface function]
2501 *
2502 * Stop transmission on the interface.
2503 */
2504 static void
2505 wm_stop(struct ifnet *ifp, int disable)
2506 {
2507 struct wm_softc *sc = ifp->if_softc;
2508 struct wm_txsoft *txs;
2509 int i;
2510
2511 /* Stop the one second clock. */
2512 callout_stop(&sc->sc_tick_ch);
2513
2514 if (sc->sc_flags & WM_F_HAS_MII) {
2515 /* Down the MII. */
2516 mii_down(&sc->sc_mii);
2517 }
2518
2519 /* Stop the transmit and receive processes. */
2520 CSR_WRITE(sc, WMREG_TCTL, 0);
2521 CSR_WRITE(sc, WMREG_RCTL, 0);
2522
2523 /* Release any queued transmit buffers. */
2524 for (i = 0; i < WM_TXQUEUELEN; i++) {
2525 txs = &sc->sc_txsoft[i];
2526 if (txs->txs_mbuf != NULL) {
2527 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2528 m_freem(txs->txs_mbuf);
2529 txs->txs_mbuf = NULL;
2530 }
2531 }
2532
2533 if (disable)
2534 wm_rxdrain(sc);
2535
2536 /* Mark the interface as down and cancel the watchdog timer. */
2537 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2538 ifp->if_timer = 0;
2539 }
2540
2541 /*
2542 * wm_acquire_eeprom:
2543 *
2544 * Perform the EEPROM handshake required on some chips.
2545 */
2546 static int
2547 wm_acquire_eeprom(struct wm_softc *sc)
2548 {
2549 uint32_t reg;
2550 int x;
2551
2552 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2553 reg = CSR_READ(sc, WMREG_EECD);
2554
2555 /* Request EEPROM access. */
2556 reg |= EECD_EE_REQ;
2557 CSR_WRITE(sc, WMREG_EECD, reg);
2558
2559 /* ..and wait for it to be granted. */
2560 for (x = 0; x < 100; x++) {
2561 reg = CSR_READ(sc, WMREG_EECD);
2562 if (reg & EECD_EE_GNT)
2563 break;
2564 delay(5);
2565 }
2566 if ((reg & EECD_EE_GNT) == 0) {
2567 aprint_error("%s: could not acquire EEPROM GNT\n",
2568 sc->sc_dev.dv_xname);
2569 reg &= ~EECD_EE_REQ;
2570 CSR_WRITE(sc, WMREG_EECD, reg);
2571 return (1);
2572 }
2573 }
2574
2575 return (0);
2576 }
2577
2578 /*
2579 * wm_release_eeprom:
2580 *
2581 * Release the EEPROM mutex.
2582 */
2583 static void
2584 wm_release_eeprom(struct wm_softc *sc)
2585 {
2586 uint32_t reg;
2587
2588 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2589 reg = CSR_READ(sc, WMREG_EECD);
2590 reg &= ~EECD_EE_REQ;
2591 CSR_WRITE(sc, WMREG_EECD, reg);
2592 }
2593 }
2594
2595 /*
2596 * wm_eeprom_sendbits:
2597 *
2598 * Send a series of bits to the EEPROM.
2599 */
2600 static void
2601 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2602 {
2603 uint32_t reg;
2604 int x;
2605
2606 reg = CSR_READ(sc, WMREG_EECD);
2607
2608 for (x = nbits; x > 0; x--) {
2609 if (bits & (1U << (x - 1)))
2610 reg |= EECD_DI;
2611 else
2612 reg &= ~EECD_DI;
2613 CSR_WRITE(sc, WMREG_EECD, reg);
2614 delay(2);
2615 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2616 delay(2);
2617 CSR_WRITE(sc, WMREG_EECD, reg);
2618 delay(2);
2619 }
2620 }
2621
2622 /*
2623 * wm_eeprom_recvbits:
2624 *
2625 * Receive a series of bits from the EEPROM.
2626 */
2627 static void
2628 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2629 {
2630 uint32_t reg, val;
2631 int x;
2632
2633 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2634
2635 val = 0;
2636 for (x = nbits; x > 0; x--) {
2637 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2638 delay(2);
2639 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2640 val |= (1U << (x - 1));
2641 CSR_WRITE(sc, WMREG_EECD, reg);
2642 delay(2);
2643 }
2644 *valp = val;
2645 }
2646
2647 /*
2648 * wm_read_eeprom_uwire:
2649 *
2650 * Read a word from the EEPROM using the MicroWire protocol.
2651 */
2652 static int
2653 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2654 {
2655 uint32_t reg, val;
2656 int i;
2657
2658 for (i = 0; i < wordcnt; i++) {
2659 /* Clear SK and DI. */
2660 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2661 CSR_WRITE(sc, WMREG_EECD, reg);
2662
2663 /* Set CHIP SELECT. */
2664 reg |= EECD_CS;
2665 CSR_WRITE(sc, WMREG_EECD, reg);
2666 delay(2);
2667
2668 /* Shift in the READ command. */
2669 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2670
2671 /* Shift in address. */
2672 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2673
2674 /* Shift out the data. */
2675 wm_eeprom_recvbits(sc, &val, 16);
2676 data[i] = val & 0xffff;
2677
2678 /* Clear CHIP SELECT. */
2679 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2680 CSR_WRITE(sc, WMREG_EECD, reg);
2681 delay(2);
2682 }
2683
2684 return (0);
2685 }
2686
2687 /*
2688 * wm_spi_eeprom_ready:
2689 *
2690 * Wait for a SPI EEPROM to be ready for commands.
2691 */
2692 static int
2693 wm_spi_eeprom_ready(struct wm_softc *sc)
2694 {
2695 uint32_t val;
2696 int usec;
2697
2698 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2699 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2700 wm_eeprom_recvbits(sc, &val, 8);
2701 if ((val & SPI_SR_RDY) == 0)
2702 break;
2703 }
2704 if (usec >= SPI_MAX_RETRIES) {
2705 aprint_error("%s: EEPROM failed to become ready\n",
2706 sc->sc_dev.dv_xname);
2707 return (1);
2708 }
2709 return (0);
2710 }
2711
2712 /*
2713 * wm_read_eeprom_spi:
2714 *
2715 * Read a work from the EEPROM using the SPI protocol.
2716 */
2717 static int
2718 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2719 {
2720 uint32_t reg, val;
2721 int i;
2722 uint8_t opc;
2723
2724 /* Clear SK and CS. */
2725 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2726 CSR_WRITE(sc, WMREG_EECD, reg);
2727 delay(2);
2728
2729 if (wm_spi_eeprom_ready(sc))
2730 return (1);
2731
2732 /* Toggle CS to flush commands. */
2733 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2734 delay(2);
2735 CSR_WRITE(sc, WMREG_EECD, reg);
2736 delay(2);
2737
2738 opc = SPI_OPC_READ;
2739 if (sc->sc_ee_addrbits == 8 && word >= 128)
2740 opc |= SPI_OPC_A8;
2741
2742 wm_eeprom_sendbits(sc, opc, 8);
2743 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2744
2745 for (i = 0; i < wordcnt; i++) {
2746 wm_eeprom_recvbits(sc, &val, 16);
2747 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2748 }
2749
2750 /* Raise CS and clear SK. */
2751 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2752 CSR_WRITE(sc, WMREG_EECD, reg);
2753 delay(2);
2754
2755 return (0);
2756 }
2757
2758 /*
2759 * wm_read_eeprom:
2760 *
2761 * Read data from the serial EEPROM.
2762 */
2763 static int
2764 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2765 {
2766 int rv;
2767
2768 if (wm_acquire_eeprom(sc))
2769 return (1);
2770
2771 if (sc->sc_flags & WM_F_EEPROM_SPI)
2772 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2773 else
2774 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2775
2776 wm_release_eeprom(sc);
2777 return (rv);
2778 }
2779
2780 /*
2781 * wm_add_rxbuf:
2782 *
2783 * Add a receive buffer to the indiciated descriptor.
2784 */
2785 static int
2786 wm_add_rxbuf(struct wm_softc *sc, int idx)
2787 {
2788 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2789 struct mbuf *m;
2790 int error;
2791
2792 MGETHDR(m, M_DONTWAIT, MT_DATA);
2793 if (m == NULL)
2794 return (ENOBUFS);
2795
2796 MCLGET(m, M_DONTWAIT);
2797 if ((m->m_flags & M_EXT) == 0) {
2798 m_freem(m);
2799 return (ENOBUFS);
2800 }
2801
2802 if (rxs->rxs_mbuf != NULL)
2803 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2804
2805 rxs->rxs_mbuf = m;
2806
2807 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2808 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2809 BUS_DMA_READ|BUS_DMA_NOWAIT);
2810 if (error) {
2811 printf("%s: unable to load rx DMA map %d, error = %d\n",
2812 sc->sc_dev.dv_xname, idx, error);
2813 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2814 }
2815
2816 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2817 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2818
2819 WM_INIT_RXDESC(sc, idx);
2820
2821 return (0);
2822 }
2823
2824 /*
2825 * wm_set_ral:
2826 *
2827 * Set an entery in the receive address list.
2828 */
2829 static void
2830 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2831 {
2832 uint32_t ral_lo, ral_hi;
2833
2834 if (enaddr != NULL) {
2835 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2836 (enaddr[3] << 24);
2837 ral_hi = enaddr[4] | (enaddr[5] << 8);
2838 ral_hi |= RAL_AV;
2839 } else {
2840 ral_lo = 0;
2841 ral_hi = 0;
2842 }
2843
2844 if (sc->sc_type >= WM_T_82544) {
2845 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2846 ral_lo);
2847 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2848 ral_hi);
2849 } else {
2850 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2851 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2852 }
2853 }
2854
2855 /*
2856 * wm_mchash:
2857 *
2858 * Compute the hash of the multicast address for the 4096-bit
2859 * multicast filter.
2860 */
2861 static uint32_t
2862 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2863 {
2864 static const int lo_shift[4] = { 4, 3, 2, 0 };
2865 static const int hi_shift[4] = { 4, 5, 6, 8 };
2866 uint32_t hash;
2867
2868 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2869 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2870
2871 return (hash & 0xfff);
2872 }
2873
2874 /*
2875 * wm_set_filter:
2876 *
2877 * Set up the receive filter.
2878 */
2879 static void
2880 wm_set_filter(struct wm_softc *sc)
2881 {
2882 struct ethercom *ec = &sc->sc_ethercom;
2883 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2884 struct ether_multi *enm;
2885 struct ether_multistep step;
2886 bus_addr_t mta_reg;
2887 uint32_t hash, reg, bit;
2888 int i;
2889
2890 if (sc->sc_type >= WM_T_82544)
2891 mta_reg = WMREG_CORDOVA_MTA;
2892 else
2893 mta_reg = WMREG_MTA;
2894
2895 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2896
2897 if (ifp->if_flags & IFF_BROADCAST)
2898 sc->sc_rctl |= RCTL_BAM;
2899 if (ifp->if_flags & IFF_PROMISC) {
2900 sc->sc_rctl |= RCTL_UPE;
2901 goto allmulti;
2902 }
2903
2904 /*
2905 * Set the station address in the first RAL slot, and
2906 * clear the remaining slots.
2907 */
2908 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2909 for (i = 1; i < WM_RAL_TABSIZE; i++)
2910 wm_set_ral(sc, NULL, i);
2911
2912 /* Clear out the multicast table. */
2913 for (i = 0; i < WM_MC_TABSIZE; i++)
2914 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2915
2916 ETHER_FIRST_MULTI(step, ec, enm);
2917 while (enm != NULL) {
2918 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2919 /*
2920 * We must listen to a range of multicast addresses.
2921 * For now, just accept all multicasts, rather than
2922 * trying to set only those filter bits needed to match
2923 * the range. (At this time, the only use of address
2924 * ranges is for IP multicast routing, for which the
2925 * range is big enough to require all bits set.)
2926 */
2927 goto allmulti;
2928 }
2929
2930 hash = wm_mchash(sc, enm->enm_addrlo);
2931
2932 reg = (hash >> 5) & 0x7f;
2933 bit = hash & 0x1f;
2934
2935 hash = CSR_READ(sc, mta_reg + (reg << 2));
2936 hash |= 1U << bit;
2937
2938 /* XXX Hardware bug?? */
2939 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2940 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2941 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2942 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2943 } else
2944 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2945
2946 ETHER_NEXT_MULTI(step, enm);
2947 }
2948
2949 ifp->if_flags &= ~IFF_ALLMULTI;
2950 goto setit;
2951
2952 allmulti:
2953 ifp->if_flags |= IFF_ALLMULTI;
2954 sc->sc_rctl |= RCTL_MPE;
2955
2956 setit:
2957 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2958 }
2959
2960 /*
2961 * wm_tbi_mediainit:
2962 *
2963 * Initialize media for use on 1000BASE-X devices.
2964 */
2965 static void
2966 wm_tbi_mediainit(struct wm_softc *sc)
2967 {
2968 const char *sep = "";
2969
2970 if (sc->sc_type < WM_T_82543)
2971 sc->sc_tipg = TIPG_WM_DFLT;
2972 else
2973 sc->sc_tipg = TIPG_LG_DFLT;
2974
2975 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2976 wm_tbi_mediastatus);
2977
2978 /*
2979 * SWD Pins:
2980 *
2981 * 0 = Link LED (output)
2982 * 1 = Loss Of Signal (input)
2983 */
2984 sc->sc_ctrl |= CTRL_SWDPIO(0);
2985 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2986
2987 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2988
2989 #define ADD(ss, mm, dd) \
2990 do { \
2991 printf("%s%s", sep, ss); \
2992 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2993 sep = ", "; \
2994 } while (/*CONSTCOND*/0)
2995
2996 printf("%s: ", sc->sc_dev.dv_xname);
2997 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2998 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2999 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3000 printf("\n");
3001
3002 #undef ADD
3003
3004 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3005 }
3006
3007 /*
3008 * wm_tbi_mediastatus: [ifmedia interface function]
3009 *
3010 * Get the current interface media status on a 1000BASE-X device.
3011 */
3012 static void
3013 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3014 {
3015 struct wm_softc *sc = ifp->if_softc;
3016
3017 ifmr->ifm_status = IFM_AVALID;
3018 ifmr->ifm_active = IFM_ETHER;
3019
3020 if (sc->sc_tbi_linkup == 0) {
3021 ifmr->ifm_active |= IFM_NONE;
3022 return;
3023 }
3024
3025 ifmr->ifm_status |= IFM_ACTIVE;
3026 ifmr->ifm_active |= IFM_1000_SX;
3027 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3028 ifmr->ifm_active |= IFM_FDX;
3029 }
3030
3031 /*
3032 * wm_tbi_mediachange: [ifmedia interface function]
3033 *
3034 * Set hardware to newly-selected media on a 1000BASE-X device.
3035 */
3036 static int
3037 wm_tbi_mediachange(struct ifnet *ifp)
3038 {
3039 struct wm_softc *sc = ifp->if_softc;
3040 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3041 uint32_t status;
3042 int i;
3043
3044 sc->sc_txcw = ife->ifm_data;
3045 if (sc->sc_ctrl & CTRL_RFCE)
3046 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
3047 if (sc->sc_ctrl & CTRL_TFCE)
3048 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
3049 sc->sc_txcw |= TXCW_ANE;
3050
3051 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3052 delay(10000);
3053
3054 sc->sc_tbi_anstate = 0;
3055
3056 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3057 /* Have signal; wait for the link to come up. */
3058 for (i = 0; i < 50; i++) {
3059 delay(10000);
3060 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3061 break;
3062 }
3063
3064 status = CSR_READ(sc, WMREG_STATUS);
3065 if (status & STATUS_LU) {
3066 /* Link is up. */
3067 DPRINTF(WM_DEBUG_LINK,
3068 ("%s: LINK: set media -> link up %s\n",
3069 sc->sc_dev.dv_xname,
3070 (status & STATUS_FD) ? "FDX" : "HDX"));
3071 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3072 if (status & STATUS_FD)
3073 sc->sc_tctl |=
3074 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3075 else
3076 sc->sc_tctl |=
3077 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3078 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3079 sc->sc_tbi_linkup = 1;
3080 } else {
3081 /* Link is down. */
3082 DPRINTF(WM_DEBUG_LINK,
3083 ("%s: LINK: set media -> link down\n",
3084 sc->sc_dev.dv_xname));
3085 sc->sc_tbi_linkup = 0;
3086 }
3087 } else {
3088 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3089 sc->sc_dev.dv_xname));
3090 sc->sc_tbi_linkup = 0;
3091 }
3092
3093 wm_tbi_set_linkled(sc);
3094
3095 return (0);
3096 }
3097
3098 /*
3099 * wm_tbi_set_linkled:
3100 *
3101 * Update the link LED on 1000BASE-X devices.
3102 */
3103 static void
3104 wm_tbi_set_linkled(struct wm_softc *sc)
3105 {
3106
3107 if (sc->sc_tbi_linkup)
3108 sc->sc_ctrl |= CTRL_SWDPIN(0);
3109 else
3110 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3111
3112 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3113 }
3114
3115 /*
3116 * wm_tbi_check_link:
3117 *
3118 * Check the link on 1000BASE-X devices.
3119 */
3120 static void
3121 wm_tbi_check_link(struct wm_softc *sc)
3122 {
3123 uint32_t rxcw, ctrl, status;
3124
3125 if (sc->sc_tbi_anstate == 0)
3126 return;
3127 else if (sc->sc_tbi_anstate > 1) {
3128 DPRINTF(WM_DEBUG_LINK,
3129 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3130 sc->sc_tbi_anstate));
3131 sc->sc_tbi_anstate--;
3132 return;
3133 }
3134
3135 sc->sc_tbi_anstate = 0;
3136
3137 rxcw = CSR_READ(sc, WMREG_RXCW);
3138 ctrl = CSR_READ(sc, WMREG_CTRL);
3139 status = CSR_READ(sc, WMREG_STATUS);
3140
3141 if ((status & STATUS_LU) == 0) {
3142 DPRINTF(WM_DEBUG_LINK,
3143 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3144 sc->sc_tbi_linkup = 0;
3145 } else {
3146 DPRINTF(WM_DEBUG_LINK,
3147 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3148 (status & STATUS_FD) ? "FDX" : "HDX"));
3149 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3150 if (status & STATUS_FD)
3151 sc->sc_tctl |=
3152 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3153 else
3154 sc->sc_tctl |=
3155 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3156 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3157 sc->sc_tbi_linkup = 1;
3158 }
3159
3160 wm_tbi_set_linkled(sc);
3161 }
3162
3163 /*
3164 * wm_gmii_reset:
3165 *
3166 * Reset the PHY.
3167 */
3168 static void
3169 wm_gmii_reset(struct wm_softc *sc)
3170 {
3171 uint32_t reg;
3172
3173 if (sc->sc_type >= WM_T_82544) {
3174 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3175 delay(20000);
3176
3177 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3178 delay(20000);
3179 } else {
3180 /* The PHY reset pin is active-low. */
3181 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3182 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3183 CTRL_EXT_SWDPIN(4));
3184 reg |= CTRL_EXT_SWDPIO(4);
3185
3186 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3187 delay(10);
3188
3189 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3190 delay(10);
3191
3192 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3193 delay(10);
3194 #if 0
3195 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3196 #endif
3197 }
3198 }
3199
3200 /*
3201 * wm_gmii_mediainit:
3202 *
3203 * Initialize media for use on 1000BASE-T devices.
3204 */
3205 static void
3206 wm_gmii_mediainit(struct wm_softc *sc)
3207 {
3208 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3209
3210 /* We have MII. */
3211 sc->sc_flags |= WM_F_HAS_MII;
3212
3213 sc->sc_tipg = TIPG_1000T_DFLT;
3214
3215 /*
3216 * Let the chip set speed/duplex on its own based on
3217 * signals from the PHY.
3218 */
3219 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3220 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3221
3222 /* Initialize our media structures and probe the GMII. */
3223 sc->sc_mii.mii_ifp = ifp;
3224
3225 if (sc->sc_type >= WM_T_82544) {
3226 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3227 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3228 } else {
3229 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3230 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3231 }
3232 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3233
3234 wm_gmii_reset(sc);
3235
3236 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3237 wm_gmii_mediastatus);
3238
3239 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3240 MII_OFFSET_ANY, 0);
3241 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3242 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3243 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3244 } else
3245 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3246 }
3247
3248 /*
3249 * wm_gmii_mediastatus: [ifmedia interface function]
3250 *
3251 * Get the current interface media status on a 1000BASE-T device.
3252 */
3253 static void
3254 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3255 {
3256 struct wm_softc *sc = ifp->if_softc;
3257
3258 mii_pollstat(&sc->sc_mii);
3259 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3260 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3261 }
3262
3263 /*
3264 * wm_gmii_mediachange: [ifmedia interface function]
3265 *
3266 * Set hardware to newly-selected media on a 1000BASE-T device.
3267 */
3268 static int
3269 wm_gmii_mediachange(struct ifnet *ifp)
3270 {
3271 struct wm_softc *sc = ifp->if_softc;
3272
3273 if (ifp->if_flags & IFF_UP)
3274 mii_mediachg(&sc->sc_mii);
3275 return (0);
3276 }
3277
3278 #define MDI_IO CTRL_SWDPIN(2)
3279 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3280 #define MDI_CLK CTRL_SWDPIN(3)
3281
3282 static void
3283 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3284 {
3285 uint32_t i, v;
3286
3287 v = CSR_READ(sc, WMREG_CTRL);
3288 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3289 v |= MDI_DIR | CTRL_SWDPIO(3);
3290
3291 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3292 if (data & i)
3293 v |= MDI_IO;
3294 else
3295 v &= ~MDI_IO;
3296 CSR_WRITE(sc, WMREG_CTRL, v);
3297 delay(10);
3298 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3299 delay(10);
3300 CSR_WRITE(sc, WMREG_CTRL, v);
3301 delay(10);
3302 }
3303 }
3304
3305 static uint32_t
3306 i82543_mii_recvbits(struct wm_softc *sc)
3307 {
3308 uint32_t v, i, data = 0;
3309
3310 v = CSR_READ(sc, WMREG_CTRL);
3311 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3312 v |= CTRL_SWDPIO(3);
3313
3314 CSR_WRITE(sc, WMREG_CTRL, v);
3315 delay(10);
3316 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3317 delay(10);
3318 CSR_WRITE(sc, WMREG_CTRL, v);
3319 delay(10);
3320
3321 for (i = 0; i < 16; i++) {
3322 data <<= 1;
3323 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3324 delay(10);
3325 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3326 data |= 1;
3327 CSR_WRITE(sc, WMREG_CTRL, v);
3328 delay(10);
3329 }
3330
3331 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3332 delay(10);
3333 CSR_WRITE(sc, WMREG_CTRL, v);
3334 delay(10);
3335
3336 return (data);
3337 }
3338
3339 #undef MDI_IO
3340 #undef MDI_DIR
3341 #undef MDI_CLK
3342
3343 /*
3344 * wm_gmii_i82543_readreg: [mii interface function]
3345 *
3346 * Read a PHY register on the GMII (i82543 version).
3347 */
3348 static int
3349 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3350 {
3351 struct wm_softc *sc = (void *) self;
3352 int rv;
3353
3354 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3355 i82543_mii_sendbits(sc, reg | (phy << 5) |
3356 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3357 rv = i82543_mii_recvbits(sc) & 0xffff;
3358
3359 DPRINTF(WM_DEBUG_GMII,
3360 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3361 sc->sc_dev.dv_xname, phy, reg, rv));
3362
3363 return (rv);
3364 }
3365
3366 /*
3367 * wm_gmii_i82543_writereg: [mii interface function]
3368 *
3369 * Write a PHY register on the GMII (i82543 version).
3370 */
3371 static void
3372 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3373 {
3374 struct wm_softc *sc = (void *) self;
3375
3376 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3377 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3378 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3379 (MII_COMMAND_START << 30), 32);
3380 }
3381
3382 /*
3383 * wm_gmii_i82544_readreg: [mii interface function]
3384 *
3385 * Read a PHY register on the GMII.
3386 */
3387 static int
3388 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3389 {
3390 struct wm_softc *sc = (void *) self;
3391 uint32_t mdic = 0;
3392 int i, rv;
3393
3394 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3395 MDIC_REGADD(reg));
3396
3397 for (i = 0; i < 100; i++) {
3398 mdic = CSR_READ(sc, WMREG_MDIC);
3399 if (mdic & MDIC_READY)
3400 break;
3401 delay(10);
3402 }
3403
3404 if ((mdic & MDIC_READY) == 0) {
3405 printf("%s: MDIC read timed out: phy %d reg %d\n",
3406 sc->sc_dev.dv_xname, phy, reg);
3407 rv = 0;
3408 } else if (mdic & MDIC_E) {
3409 #if 0 /* This is normal if no PHY is present. */
3410 printf("%s: MDIC read error: phy %d reg %d\n",
3411 sc->sc_dev.dv_xname, phy, reg);
3412 #endif
3413 rv = 0;
3414 } else {
3415 rv = MDIC_DATA(mdic);
3416 if (rv == 0xffff)
3417 rv = 0;
3418 }
3419
3420 return (rv);
3421 }
3422
3423 /*
3424 * wm_gmii_i82544_writereg: [mii interface function]
3425 *
3426 * Write a PHY register on the GMII.
3427 */
3428 static void
3429 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3430 {
3431 struct wm_softc *sc = (void *) self;
3432 uint32_t mdic = 0;
3433 int i;
3434
3435 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3436 MDIC_REGADD(reg) | MDIC_DATA(val));
3437
3438 for (i = 0; i < 100; i++) {
3439 mdic = CSR_READ(sc, WMREG_MDIC);
3440 if (mdic & MDIC_READY)
3441 break;
3442 delay(10);
3443 }
3444
3445 if ((mdic & MDIC_READY) == 0)
3446 printf("%s: MDIC write timed out: phy %d reg %d\n",
3447 sc->sc_dev.dv_xname, phy, reg);
3448 else if (mdic & MDIC_E)
3449 printf("%s: MDIC write error: phy %d reg %d\n",
3450 sc->sc_dev.dv_xname, phy, reg);
3451 }
3452
3453 /*
3454 * wm_gmii_statchg: [mii interface function]
3455 *
3456 * Callback from MII layer when media changes.
3457 */
3458 static void
3459 wm_gmii_statchg(struct device *self)
3460 {
3461 struct wm_softc *sc = (void *) self;
3462
3463 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3464
3465 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3466 DPRINTF(WM_DEBUG_LINK,
3467 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3468 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3469 } else {
3470 DPRINTF(WM_DEBUG_LINK,
3471 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3472 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3473 }
3474
3475 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3476 }
3477