if_wm.c revision 1.62 1 /* $NetBSD: if_wm.c,v 1.62 2003/11/07 06:03:52 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out performance stability issue on i82547 (fvdl).
45 * - Figure out what to do with the i82545GM and i82546GB
46 * SERDES controllers.
47 * - Fix hw VLAN assist.
48 */
49
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.62 2003/11/07 06:03:52 thorpej Exp $");
52
53 #include "bpfilter.h"
54 #include "rnd.h"
55
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/callout.h>
59 #include <sys/mbuf.h>
60 #include <sys/malloc.h>
61 #include <sys/kernel.h>
62 #include <sys/socket.h>
63 #include <sys/ioctl.h>
64 #include <sys/errno.h>
65 #include <sys/device.h>
66 #include <sys/queue.h>
67
68 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
69
70 #if NRND > 0
71 #include <sys/rnd.h>
72 #endif
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and manage
118 * up to 64 of them at a time. We allow up to 16 DMA segments per
119 * packet.
120 */
121 #define WM_NTXSEGS 16
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN 64
124 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
125 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
126 #define WM_NTXDESC 256
127 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
128 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
129 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
130
131 /*
132 * Receive descriptor list size. We have one Rx buffer for normal
133 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
134 * packet. We allocate 256 receive descriptors, each with a 2k
135 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
136 */
137 #define WM_NRXDESC 256
138 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
139 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
140 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
141
142 /*
143 * Control structures are DMA'd to the i82542 chip. We allocate them in
144 * a single clump that maps to a single DMA segment to make serveral things
145 * easier.
146 */
147 struct wm_control_data {
148 /*
149 * The transmit descriptors.
150 */
151 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
152
153 /*
154 * The receive descriptors.
155 */
156 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
157 };
158
159 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
160 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
161 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
162
163 /*
164 * Software state for transmit jobs.
165 */
166 struct wm_txsoft {
167 struct mbuf *txs_mbuf; /* head of our mbuf chain */
168 bus_dmamap_t txs_dmamap; /* our DMA map */
169 int txs_firstdesc; /* first descriptor in packet */
170 int txs_lastdesc; /* last descriptor in packet */
171 int txs_ndesc; /* # of descriptors used */
172 };
173
174 /*
175 * Software state for receive buffers. Each descriptor gets a
176 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
177 * more than one buffer, we chain them together.
178 */
179 struct wm_rxsoft {
180 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
181 bus_dmamap_t rxs_dmamap; /* our DMA map */
182 };
183
184 typedef enum {
185 WM_T_unknown = 0,
186 WM_T_82542_2_0, /* i82542 2.0 (really old) */
187 WM_T_82542_2_1, /* i82542 2.1+ (old) */
188 WM_T_82543, /* i82543 */
189 WM_T_82544, /* i82544 */
190 WM_T_82540, /* i82540 */
191 WM_T_82545, /* i82545 */
192 WM_T_82545_3, /* i82545 3.0+ */
193 WM_T_82546, /* i82546 */
194 WM_T_82546_3, /* i82546 3.0+ */
195 WM_T_82541, /* i82541 */
196 WM_T_82541_2, /* i82541 2.0+ */
197 WM_T_82547, /* i82547 */
198 WM_T_82547_2, /* i82547 2.0+ */
199 } wm_chip_type;
200
201 /*
202 * Software state per device.
203 */
204 struct wm_softc {
205 struct device sc_dev; /* generic device information */
206 bus_space_tag_t sc_st; /* bus space tag */
207 bus_space_handle_t sc_sh; /* bus space handle */
208 bus_space_tag_t sc_iot; /* I/O space tag */
209 bus_space_handle_t sc_ioh; /* I/O space handle */
210 bus_dma_tag_t sc_dmat; /* bus DMA tag */
211 struct ethercom sc_ethercom; /* ethernet common data */
212 void *sc_sdhook; /* shutdown hook */
213
214 wm_chip_type sc_type; /* chip type */
215 int sc_flags; /* flags; see below */
216 int sc_bus_speed; /* PCI/PCIX bus speed */
217 int sc_pcix_offset; /* PCIX capability register offset */
218
219 void *sc_ih; /* interrupt cookie */
220
221 int sc_ee_addrbits; /* EEPROM address bits */
222
223 struct mii_data sc_mii; /* MII/media information */
224
225 struct callout sc_tick_ch; /* tick callout */
226
227 bus_dmamap_t sc_cddmamap; /* control data DMA map */
228 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
229
230 int sc_align_tweak;
231
232 /*
233 * Software state for the transmit and receive descriptors.
234 */
235 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
236 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
237
238 /*
239 * Control data structures.
240 */
241 struct wm_control_data *sc_control_data;
242 #define sc_txdescs sc_control_data->wcd_txdescs
243 #define sc_rxdescs sc_control_data->wcd_rxdescs
244
245 #ifdef WM_EVENT_COUNTERS
246 /* Event counters. */
247 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
248 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
249 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
250 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
251 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
252 struct evcnt sc_ev_rxintr; /* Rx interrupts */
253 struct evcnt sc_ev_linkintr; /* Link interrupts */
254
255 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
256 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
257 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
258 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
259
260 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
261 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
262 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
263
264 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
265 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
266
267 struct evcnt sc_ev_tu; /* Tx underrun */
268 #endif /* WM_EVENT_COUNTERS */
269
270 bus_addr_t sc_tdt_reg; /* offset of TDT register */
271
272 int sc_txfree; /* number of free Tx descriptors */
273 int sc_txnext; /* next ready Tx descriptor */
274
275 int sc_txsfree; /* number of free Tx jobs */
276 int sc_txsnext; /* next free Tx job */
277 int sc_txsdirty; /* dirty Tx jobs */
278
279 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
280 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
281
282 bus_addr_t sc_rdt_reg; /* offset of RDT register */
283
284 int sc_rxptr; /* next ready Rx descriptor/queue ent */
285 int sc_rxdiscard;
286 int sc_rxlen;
287 struct mbuf *sc_rxhead;
288 struct mbuf *sc_rxtail;
289 struct mbuf **sc_rxtailp;
290
291 uint32_t sc_ctrl; /* prototype CTRL register */
292 #if 0
293 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
294 #endif
295 uint32_t sc_icr; /* prototype interrupt bits */
296 uint32_t sc_tctl; /* prototype TCTL register */
297 uint32_t sc_rctl; /* prototype RCTL register */
298 uint32_t sc_txcw; /* prototype TXCW register */
299 uint32_t sc_tipg; /* prototype TIPG register */
300
301 int sc_tbi_linkup; /* TBI link status */
302 int sc_tbi_anstate; /* autonegotiation state */
303
304 int sc_mchash_type; /* multicast filter offset */
305
306 #if NRND > 0
307 rndsource_element_t rnd_source; /* random source */
308 #endif
309 };
310
311 #define WM_RXCHAIN_RESET(sc) \
312 do { \
313 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
314 *(sc)->sc_rxtailp = NULL; \
315 (sc)->sc_rxlen = 0; \
316 } while (/*CONSTCOND*/0)
317
318 #define WM_RXCHAIN_LINK(sc, m) \
319 do { \
320 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
321 (sc)->sc_rxtailp = &(m)->m_next; \
322 } while (/*CONSTCOND*/0)
323
324 /* sc_flags */
325 #define WM_F_HAS_MII 0x01 /* has MII */
326 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
327 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
328 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
329 #define WM_F_BUS64 0x20 /* bus is 64-bit */
330 #define WM_F_PCIX 0x40 /* bus is PCI-X */
331
332 #ifdef WM_EVENT_COUNTERS
333 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
334 #else
335 #define WM_EVCNT_INCR(ev) /* nothing */
336 #endif
337
338 #define CSR_READ(sc, reg) \
339 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
340 #define CSR_WRITE(sc, reg, val) \
341 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
342
343 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
344 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
345
346 #define WM_CDTXSYNC(sc, x, n, ops) \
347 do { \
348 int __x, __n; \
349 \
350 __x = (x); \
351 __n = (n); \
352 \
353 /* If it will wrap around, sync to the end of the ring. */ \
354 if ((__x + __n) > WM_NTXDESC) { \
355 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
356 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
357 (WM_NTXDESC - __x), (ops)); \
358 __n -= (WM_NTXDESC - __x); \
359 __x = 0; \
360 } \
361 \
362 /* Now sync whatever is left. */ \
363 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
364 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
365 } while (/*CONSTCOND*/0)
366
367 #define WM_CDRXSYNC(sc, x, ops) \
368 do { \
369 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
370 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
371 } while (/*CONSTCOND*/0)
372
373 #define WM_INIT_RXDESC(sc, x) \
374 do { \
375 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
376 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
377 struct mbuf *__m = __rxs->rxs_mbuf; \
378 \
379 /* \
380 * Note: We scoot the packet forward 2 bytes in the buffer \
381 * so that the payload after the Ethernet header is aligned \
382 * to a 4-byte boundary. \
383 * \
384 * XXX BRAINDAMAGE ALERT! \
385 * The stupid chip uses the same size for every buffer, which \
386 * is set in the Receive Control register. We are using the 2K \
387 * size option, but what we REALLY want is (2K - 2)! For this \
388 * reason, we can't "scoot" packets longer than the standard \
389 * Ethernet MTU. On strict-alignment platforms, if the total \
390 * size exceeds (2K - 2) we set align_tweak to 0 and let \
391 * the upper layer copy the headers. \
392 */ \
393 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
394 \
395 __rxd->wrx_addr.wa_low = \
396 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
397 (sc)->sc_align_tweak); \
398 __rxd->wrx_addr.wa_high = 0; \
399 __rxd->wrx_len = 0; \
400 __rxd->wrx_cksum = 0; \
401 __rxd->wrx_status = 0; \
402 __rxd->wrx_errors = 0; \
403 __rxd->wrx_special = 0; \
404 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
405 \
406 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
407 } while (/*CONSTCOND*/0)
408
409 static void wm_start(struct ifnet *);
410 static void wm_watchdog(struct ifnet *);
411 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
412 static int wm_init(struct ifnet *);
413 static void wm_stop(struct ifnet *, int);
414
415 static void wm_shutdown(void *);
416
417 static void wm_reset(struct wm_softc *);
418 static void wm_rxdrain(struct wm_softc *);
419 static int wm_add_rxbuf(struct wm_softc *, int);
420 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
421 static void wm_tick(void *);
422
423 static void wm_set_filter(struct wm_softc *);
424
425 static int wm_intr(void *);
426 static void wm_txintr(struct wm_softc *);
427 static void wm_rxintr(struct wm_softc *);
428 static void wm_linkintr(struct wm_softc *, uint32_t);
429
430 static void wm_tbi_mediainit(struct wm_softc *);
431 static int wm_tbi_mediachange(struct ifnet *);
432 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
433
434 static void wm_tbi_set_linkled(struct wm_softc *);
435 static void wm_tbi_check_link(struct wm_softc *);
436
437 static void wm_gmii_reset(struct wm_softc *);
438
439 static int wm_gmii_i82543_readreg(struct device *, int, int);
440 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
441
442 static int wm_gmii_i82544_readreg(struct device *, int, int);
443 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
444
445 static void wm_gmii_statchg(struct device *);
446
447 static void wm_gmii_mediainit(struct wm_softc *);
448 static int wm_gmii_mediachange(struct ifnet *);
449 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
450
451 static int wm_match(struct device *, struct cfdata *, void *);
452 static void wm_attach(struct device *, struct device *, void *);
453
454 CFATTACH_DECL(wm, sizeof(struct wm_softc),
455 wm_match, wm_attach, NULL, NULL);
456
457 /*
458 * Devices supported by this driver.
459 */
460 const struct wm_product {
461 pci_vendor_id_t wmp_vendor;
462 pci_product_id_t wmp_product;
463 const char *wmp_name;
464 wm_chip_type wmp_type;
465 int wmp_flags;
466 #define WMP_F_1000X 0x01
467 #define WMP_F_1000T 0x02
468 } wm_products[] = {
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
470 "Intel i82542 1000BASE-X Ethernet",
471 WM_T_82542_2_1, WMP_F_1000X },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
474 "Intel i82543GC 1000BASE-X Ethernet",
475 WM_T_82543, WMP_F_1000X },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
478 "Intel i82543GC 1000BASE-T Ethernet",
479 WM_T_82543, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
482 "Intel i82544EI 1000BASE-T Ethernet",
483 WM_T_82544, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
486 "Intel i82544EI 1000BASE-X Ethernet",
487 WM_T_82544, WMP_F_1000X },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
490 "Intel i82544GC 1000BASE-T Ethernet",
491 WM_T_82544, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
494 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
495 WM_T_82544, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
498 "Intel i82540EM 1000BASE-T Ethernet",
499 WM_T_82540, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
502 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
503 WM_T_82540, WMP_F_1000T },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
506 "Intel i82540EP 1000BASE-T Ethernet",
507 WM_T_82540, WMP_F_1000T },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
510 "Intel i82540EP 1000BASE-T Ethernet",
511 WM_T_82540, WMP_F_1000T },
512
513 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
514 "Intel i82540EP 1000BASE-T Ethernet",
515 WM_T_82540, WMP_F_1000T },
516
517 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
518 "Intel i82545EM 1000BASE-T Ethernet",
519 WM_T_82545, WMP_F_1000T },
520
521 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
522 "Intel i82545GM 1000BASE-T Ethernet",
523 WM_T_82545_3, WMP_F_1000T },
524
525 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
526 "Intel i82545GM 1000BASE-X Ethernet",
527 WM_T_82545_3, WMP_F_1000X },
528 #if 0
529 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
530 "Intel i82545GM Gigabit Ethernet (SERDES)",
531 WM_T_82545_3, WMP_F_SERDES },
532 #endif
533 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
534 "Intel i82546EB 1000BASE-T Ethernet",
535 WM_T_82546, WMP_F_1000T },
536
537 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
538 "Intel i82546EB 1000BASE-T Ethernet",
539 WM_T_82546, WMP_F_1000T },
540
541 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
542 "Intel i82545EM 1000BASE-X Ethernet",
543 WM_T_82545, WMP_F_1000X },
544
545 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
546 "Intel i82546EB 1000BASE-X Ethernet",
547 WM_T_82546, WMP_F_1000X },
548
549 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
550 "Intel i82546GB 1000BASE-T Ethernet",
551 WM_T_82546_3, WMP_F_1000T },
552
553 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
554 "Intel i82546GB 1000BASE-X Ethernet",
555 WM_T_82546_3, WMP_F_1000X },
556 #if 0
557 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
558 "Intel i82546GB Gigabit Ethernet (SERDES)",
559 WM_T_82546_3, WMP_F_SERDES },
560 #endif
561 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
562 "Intel i82541EI Mobile 1000BASE-T Ethernet",
563 WM_T_82541, WMP_F_1000T },
564
565 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
566 "Intel i82541ER 1000BASE-T Ethernet",
567 WM_T_82541_2, WMP_F_1000T },
568
569 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
570 "Intel i82541GI 1000BASE-T Ethernet",
571 WM_T_82541_2, WMP_F_1000T },
572
573 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
574 "Intel i82541GI Mobile 1000BASE-T Ethernet",
575 WM_T_82541_2, WMP_F_1000T },
576
577 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
578 "Intel i82547EI 1000BASE-T Ethernet",
579 WM_T_82547, WMP_F_1000T },
580
581 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
582 "Intel i82547GI 1000BASE-T Ethernet",
583 WM_T_82547_2, WMP_F_1000T },
584 { 0, 0,
585 NULL,
586 0, 0 },
587 };
588
589 #ifdef WM_EVENT_COUNTERS
590 #if WM_NTXSEGS != 16
591 #error Update wm_txseg_evcnt_names
592 #endif
593 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
594 "txseg1",
595 "txseg2",
596 "txseg3",
597 "txseg4",
598 "txseg5",
599 "txseg6",
600 "txseg7",
601 "txseg8",
602 "txseg9",
603 "txseg10",
604 "txseg11",
605 "txseg12",
606 "txseg13",
607 "txseg14",
608 "txseg15",
609 "txseg16",
610 };
611 #endif /* WM_EVENT_COUNTERS */
612
613 #if 0 /* Not currently used */
614 static __inline uint32_t
615 wm_io_read(struct wm_softc *sc, int reg)
616 {
617
618 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
619 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
620 }
621 #endif
622
623 static __inline void
624 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
625 {
626
627 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
628 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
629 }
630
631 static const struct wm_product *
632 wm_lookup(const struct pci_attach_args *pa)
633 {
634 const struct wm_product *wmp;
635
636 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
637 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
638 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
639 return (wmp);
640 }
641 return (NULL);
642 }
643
644 static int
645 wm_match(struct device *parent, struct cfdata *cf, void *aux)
646 {
647 struct pci_attach_args *pa = aux;
648
649 if (wm_lookup(pa) != NULL)
650 return (1);
651
652 return (0);
653 }
654
655 static void
656 wm_attach(struct device *parent, struct device *self, void *aux)
657 {
658 struct wm_softc *sc = (void *) self;
659 struct pci_attach_args *pa = aux;
660 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
661 pci_chipset_tag_t pc = pa->pa_pc;
662 pci_intr_handle_t ih;
663 const char *intrstr = NULL;
664 const char *eetype;
665 bus_space_tag_t memt;
666 bus_space_handle_t memh;
667 bus_dma_segment_t seg;
668 int memh_valid;
669 int i, rseg, error;
670 const struct wm_product *wmp;
671 uint8_t enaddr[ETHER_ADDR_LEN];
672 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
673 pcireg_t preg, memtype;
674 uint32_t reg;
675 int pmreg;
676
677 callout_init(&sc->sc_tick_ch);
678
679 wmp = wm_lookup(pa);
680 if (wmp == NULL) {
681 printf("\n");
682 panic("wm_attach: impossible");
683 }
684
685 sc->sc_dmat = pa->pa_dmat;
686
687 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
688 aprint_naive(": Ethernet controller\n");
689 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
690
691 sc->sc_type = wmp->wmp_type;
692 if (sc->sc_type < WM_T_82543) {
693 if (preg < 2) {
694 aprint_error("%s: i82542 must be at least rev. 2\n",
695 sc->sc_dev.dv_xname);
696 return;
697 }
698 if (preg < 3)
699 sc->sc_type = WM_T_82542_2_0;
700 }
701
702 /*
703 * Map the device. All devices support memory-mapped acccess,
704 * and it is really required for normal operation.
705 */
706 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
707 switch (memtype) {
708 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
709 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
710 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
711 memtype, 0, &memt, &memh, NULL, NULL) == 0);
712 break;
713 default:
714 memh_valid = 0;
715 }
716
717 if (memh_valid) {
718 sc->sc_st = memt;
719 sc->sc_sh = memh;
720 } else {
721 aprint_error("%s: unable to map device registers\n",
722 sc->sc_dev.dv_xname);
723 return;
724 }
725
726 /*
727 * In addition, i82544 and later support I/O mapped indirect
728 * register access. It is not desirable (nor supported in
729 * this driver) to use it for normal operation, though it is
730 * required to work around bugs in some chip versions.
731 */
732 if (sc->sc_type >= WM_T_82544) {
733 /* First we have to find the I/O BAR. */
734 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
735 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
736 PCI_MAPREG_TYPE_IO)
737 break;
738 }
739 if (i == PCI_MAPREG_END)
740 aprint_error("%s: WARNING: unable to find I/O BAR\n",
741 sc->sc_dev.dv_xname);
742 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
743 0, &sc->sc_iot, &sc->sc_ioh,
744 NULL, NULL) == 0)
745 sc->sc_flags |= WM_F_IOH_VALID;
746 else
747 aprint_error("%s: WARNING: unable to map I/O space\n",
748 sc->sc_dev.dv_xname);
749 }
750
751 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
752 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
753 preg |= PCI_COMMAND_MASTER_ENABLE;
754 if (sc->sc_type < WM_T_82542_2_1)
755 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
756 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
757
758 /* Get it out of power save mode, if needed. */
759 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
760 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
761 PCI_PMCSR_STATE_MASK;
762 if (preg == PCI_PMCSR_STATE_D3) {
763 /*
764 * The card has lost all configuration data in
765 * this state, so punt.
766 */
767 aprint_error("%s: unable to wake from power state D3\n",
768 sc->sc_dev.dv_xname);
769 return;
770 }
771 if (preg != PCI_PMCSR_STATE_D0) {
772 aprint_normal("%s: waking up from power state D%d\n",
773 sc->sc_dev.dv_xname, preg);
774 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
775 PCI_PMCSR_STATE_D0);
776 }
777 }
778
779 /*
780 * Map and establish our interrupt.
781 */
782 if (pci_intr_map(pa, &ih)) {
783 aprint_error("%s: unable to map interrupt\n",
784 sc->sc_dev.dv_xname);
785 return;
786 }
787 intrstr = pci_intr_string(pc, ih);
788 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
789 if (sc->sc_ih == NULL) {
790 aprint_error("%s: unable to establish interrupt",
791 sc->sc_dev.dv_xname);
792 if (intrstr != NULL)
793 aprint_normal(" at %s", intrstr);
794 aprint_normal("\n");
795 return;
796 }
797 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
798
799 /*
800 * Determine a few things about the bus we're connected to.
801 */
802 if (sc->sc_type < WM_T_82543) {
803 /* We don't really know the bus characteristics here. */
804 sc->sc_bus_speed = 33;
805 } else {
806 reg = CSR_READ(sc, WMREG_STATUS);
807 if (reg & STATUS_BUS64)
808 sc->sc_flags |= WM_F_BUS64;
809 if (sc->sc_type >= WM_T_82544 &&
810 (reg & STATUS_PCIX_MODE) != 0) {
811 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
812
813 sc->sc_flags |= WM_F_PCIX;
814 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
815 PCI_CAP_PCIX,
816 &sc->sc_pcix_offset, NULL) == 0)
817 aprint_error("%s: unable to find PCIX "
818 "capability\n", sc->sc_dev.dv_xname);
819 else if (sc->sc_type != WM_T_82545_3 &&
820 sc->sc_type != WM_T_82546_3) {
821 /*
822 * Work around a problem caused by the BIOS
823 * setting the max memory read byte count
824 * incorrectly.
825 */
826 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
827 sc->sc_pcix_offset + PCI_PCIX_CMD);
828 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
829 sc->sc_pcix_offset + PCI_PCIX_STATUS);
830
831 bytecnt =
832 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
833 PCI_PCIX_CMD_BYTECNT_SHIFT;
834 maxb =
835 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
836 PCI_PCIX_STATUS_MAXB_SHIFT;
837 if (bytecnt > maxb) {
838 aprint_verbose("%s: resetting PCI-X "
839 "MMRBC: %d -> %d\n",
840 sc->sc_dev.dv_xname,
841 512 << bytecnt, 512 << maxb);
842 pcix_cmd = (pcix_cmd &
843 ~PCI_PCIX_CMD_BYTECNT_MASK) |
844 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
845 pci_conf_write(pa->pa_pc, pa->pa_tag,
846 sc->sc_pcix_offset + PCI_PCIX_CMD,
847 pcix_cmd);
848 }
849 }
850 }
851 /*
852 * The quad port adapter is special; it has a PCIX-PCIX
853 * bridge on the board, and can run the secondary bus at
854 * a higher speed.
855 */
856 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
857 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
858 : 66;
859 } else if (sc->sc_flags & WM_F_PCIX) {
860 switch (reg & STATUS_PCIXSPD_MASK) {
861 case STATUS_PCIXSPD_50_66:
862 sc->sc_bus_speed = 66;
863 break;
864 case STATUS_PCIXSPD_66_100:
865 sc->sc_bus_speed = 100;
866 break;
867 case STATUS_PCIXSPD_100_133:
868 sc->sc_bus_speed = 133;
869 break;
870 default:
871 aprint_error(
872 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
873 sc->sc_dev.dv_xname,
874 reg & STATUS_PCIXSPD_MASK);
875 sc->sc_bus_speed = 66;
876 }
877 } else
878 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
879 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
880 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
881 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
882 }
883
884 /*
885 * Allocate the control data structures, and create and load the
886 * DMA map for it.
887 */
888 if ((error = bus_dmamem_alloc(sc->sc_dmat,
889 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
890 0)) != 0) {
891 aprint_error(
892 "%s: unable to allocate control data, error = %d\n",
893 sc->sc_dev.dv_xname, error);
894 goto fail_0;
895 }
896
897 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
898 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
899 0)) != 0) {
900 aprint_error("%s: unable to map control data, error = %d\n",
901 sc->sc_dev.dv_xname, error);
902 goto fail_1;
903 }
904
905 if ((error = bus_dmamap_create(sc->sc_dmat,
906 sizeof(struct wm_control_data), 1,
907 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
908 aprint_error("%s: unable to create control data DMA map, "
909 "error = %d\n", sc->sc_dev.dv_xname, error);
910 goto fail_2;
911 }
912
913 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
914 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
915 0)) != 0) {
916 aprint_error(
917 "%s: unable to load control data DMA map, error = %d\n",
918 sc->sc_dev.dv_xname, error);
919 goto fail_3;
920 }
921
922 /*
923 * Create the transmit buffer DMA maps.
924 */
925 for (i = 0; i < WM_TXQUEUELEN; i++) {
926 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
927 WM_NTXSEGS, MCLBYTES, 0, 0,
928 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
929 aprint_error("%s: unable to create Tx DMA map %d, "
930 "error = %d\n", sc->sc_dev.dv_xname, i, error);
931 goto fail_4;
932 }
933 }
934
935 /*
936 * Create the receive buffer DMA maps.
937 */
938 for (i = 0; i < WM_NRXDESC; i++) {
939 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
940 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
941 aprint_error("%s: unable to create Rx DMA map %d, "
942 "error = %d\n", sc->sc_dev.dv_xname, i, error);
943 goto fail_5;
944 }
945 sc->sc_rxsoft[i].rxs_mbuf = NULL;
946 }
947
948 /*
949 * Reset the chip to a known state.
950 */
951 wm_reset(sc);
952
953 /*
954 * Get some information about the EEPROM.
955 */
956 if (sc->sc_type >= WM_T_82540)
957 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
958 if (sc->sc_type <= WM_T_82544)
959 sc->sc_ee_addrbits = 6;
960 else if (sc->sc_type <= WM_T_82546_3) {
961 reg = CSR_READ(sc, WMREG_EECD);
962 if (reg & EECD_EE_SIZE)
963 sc->sc_ee_addrbits = 8;
964 else
965 sc->sc_ee_addrbits = 6;
966 } else if (sc->sc_type <= WM_T_82547_2) {
967 reg = CSR_READ(sc, WMREG_EECD);
968 if (reg & EECD_EE_TYPE) {
969 sc->sc_flags |= WM_F_EEPROM_SPI;
970 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
971 } else
972 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
973 } else {
974 /* Assume everything else is SPI. */
975 reg = CSR_READ(sc, WMREG_EECD);
976 sc->sc_flags |= WM_F_EEPROM_SPI;
977 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
978 }
979 if (sc->sc_flags & WM_F_EEPROM_SPI)
980 eetype = "SPI";
981 else
982 eetype = "MicroWire";
983 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
984 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
985 sc->sc_ee_addrbits, eetype);
986
987 /*
988 * Read the Ethernet address from the EEPROM.
989 */
990 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
991 sizeof(myea) / sizeof(myea[0]), myea)) {
992 aprint_error("%s: unable to read Ethernet address\n",
993 sc->sc_dev.dv_xname);
994 return;
995 }
996 enaddr[0] = myea[0] & 0xff;
997 enaddr[1] = myea[0] >> 8;
998 enaddr[2] = myea[1] & 0xff;
999 enaddr[3] = myea[1] >> 8;
1000 enaddr[4] = myea[2] & 0xff;
1001 enaddr[5] = myea[2] >> 8;
1002
1003 /*
1004 * Toggle the LSB of the MAC address on the second port
1005 * of the i82546.
1006 */
1007 if (sc->sc_type == WM_T_82546) {
1008 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1009 enaddr[5] ^= 1;
1010 }
1011
1012 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1013 ether_sprintf(enaddr));
1014
1015 /*
1016 * Read the config info from the EEPROM, and set up various
1017 * bits in the control registers based on their contents.
1018 */
1019 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1020 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1021 sc->sc_dev.dv_xname);
1022 return;
1023 }
1024 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1025 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1026 sc->sc_dev.dv_xname);
1027 return;
1028 }
1029 if (sc->sc_type >= WM_T_82544) {
1030 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1031 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1032 sc->sc_dev.dv_xname);
1033 return;
1034 }
1035 }
1036
1037 if (cfg1 & EEPROM_CFG1_ILOS)
1038 sc->sc_ctrl |= CTRL_ILOS;
1039 if (sc->sc_type >= WM_T_82544) {
1040 sc->sc_ctrl |=
1041 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1042 CTRL_SWDPIO_SHIFT;
1043 sc->sc_ctrl |=
1044 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1045 CTRL_SWDPINS_SHIFT;
1046 } else {
1047 sc->sc_ctrl |=
1048 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1049 CTRL_SWDPIO_SHIFT;
1050 }
1051
1052 #if 0
1053 if (sc->sc_type >= WM_T_82544) {
1054 if (cfg1 & EEPROM_CFG1_IPS0)
1055 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1056 if (cfg1 & EEPROM_CFG1_IPS1)
1057 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1058 sc->sc_ctrl_ext |=
1059 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1060 CTRL_EXT_SWDPIO_SHIFT;
1061 sc->sc_ctrl_ext |=
1062 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1063 CTRL_EXT_SWDPINS_SHIFT;
1064 } else {
1065 sc->sc_ctrl_ext |=
1066 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1067 CTRL_EXT_SWDPIO_SHIFT;
1068 }
1069 #endif
1070
1071 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1072 #if 0
1073 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1074 #endif
1075
1076 /*
1077 * Set up some register offsets that are different between
1078 * the i82542 and the i82543 and later chips.
1079 */
1080 if (sc->sc_type < WM_T_82543) {
1081 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1082 sc->sc_tdt_reg = WMREG_OLD_TDT;
1083 } else {
1084 sc->sc_rdt_reg = WMREG_RDT;
1085 sc->sc_tdt_reg = WMREG_TDT;
1086 }
1087
1088 /*
1089 * Determine if we should use flow control. We should
1090 * always use it, unless we're on a i82542 < 2.1.
1091 */
1092 if (sc->sc_type >= WM_T_82542_2_1)
1093 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1094
1095 /*
1096 * Determine if we're TBI or GMII mode, and initialize the
1097 * media structures accordingly.
1098 */
1099 if (sc->sc_type < WM_T_82543 ||
1100 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1101 if (wmp->wmp_flags & WMP_F_1000T)
1102 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1103 "product!\n", sc->sc_dev.dv_xname);
1104 wm_tbi_mediainit(sc);
1105 } else {
1106 if (wmp->wmp_flags & WMP_F_1000X)
1107 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1108 "product!\n", sc->sc_dev.dv_xname);
1109 wm_gmii_mediainit(sc);
1110 }
1111
1112 ifp = &sc->sc_ethercom.ec_if;
1113 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1114 ifp->if_softc = sc;
1115 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1116 ifp->if_ioctl = wm_ioctl;
1117 ifp->if_start = wm_start;
1118 ifp->if_watchdog = wm_watchdog;
1119 ifp->if_init = wm_init;
1120 ifp->if_stop = wm_stop;
1121 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1122 IFQ_SET_READY(&ifp->if_snd);
1123
1124 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1125
1126 /*
1127 * If we're a i82543 or greater, we can support VLANs.
1128 */
1129 if (sc->sc_type >= WM_T_82543)
1130 sc->sc_ethercom.ec_capabilities |=
1131 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1132
1133 /*
1134 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1135 * on i82543 and later.
1136 */
1137 if (sc->sc_type >= WM_T_82543)
1138 ifp->if_capabilities |=
1139 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1140
1141 /*
1142 * Attach the interface.
1143 */
1144 if_attach(ifp);
1145 ether_ifattach(ifp, enaddr);
1146 #if NRND > 0
1147 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1148 RND_TYPE_NET, 0);
1149 #endif
1150
1151 #ifdef WM_EVENT_COUNTERS
1152 /* Attach event counters. */
1153 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1154 NULL, sc->sc_dev.dv_xname, "txsstall");
1155 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1156 NULL, sc->sc_dev.dv_xname, "txdstall");
1157 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1158 NULL, sc->sc_dev.dv_xname, "txforceintr");
1159 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1160 NULL, sc->sc_dev.dv_xname, "txdw");
1161 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1162 NULL, sc->sc_dev.dv_xname, "txqe");
1163 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1164 NULL, sc->sc_dev.dv_xname, "rxintr");
1165 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1166 NULL, sc->sc_dev.dv_xname, "linkintr");
1167
1168 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1169 NULL, sc->sc_dev.dv_xname, "rxipsum");
1170 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1171 NULL, sc->sc_dev.dv_xname, "rxtusum");
1172 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1173 NULL, sc->sc_dev.dv_xname, "txipsum");
1174 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1175 NULL, sc->sc_dev.dv_xname, "txtusum");
1176
1177 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1178 NULL, sc->sc_dev.dv_xname, "txctx init");
1179 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1180 NULL, sc->sc_dev.dv_xname, "txctx hit");
1181 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1182 NULL, sc->sc_dev.dv_xname, "txctx miss");
1183
1184 for (i = 0; i < WM_NTXSEGS; i++)
1185 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1186 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1187
1188 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1189 NULL, sc->sc_dev.dv_xname, "txdrop");
1190
1191 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1192 NULL, sc->sc_dev.dv_xname, "tu");
1193 #endif /* WM_EVENT_COUNTERS */
1194
1195 /*
1196 * Make sure the interface is shutdown during reboot.
1197 */
1198 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1199 if (sc->sc_sdhook == NULL)
1200 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1201 sc->sc_dev.dv_xname);
1202 return;
1203
1204 /*
1205 * Free any resources we've allocated during the failed attach
1206 * attempt. Do this in reverse order and fall through.
1207 */
1208 fail_5:
1209 for (i = 0; i < WM_NRXDESC; i++) {
1210 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1211 bus_dmamap_destroy(sc->sc_dmat,
1212 sc->sc_rxsoft[i].rxs_dmamap);
1213 }
1214 fail_4:
1215 for (i = 0; i < WM_TXQUEUELEN; i++) {
1216 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1217 bus_dmamap_destroy(sc->sc_dmat,
1218 sc->sc_txsoft[i].txs_dmamap);
1219 }
1220 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1221 fail_3:
1222 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1223 fail_2:
1224 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1225 sizeof(struct wm_control_data));
1226 fail_1:
1227 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1228 fail_0:
1229 return;
1230 }
1231
1232 /*
1233 * wm_shutdown:
1234 *
1235 * Make sure the interface is stopped at reboot time.
1236 */
1237 static void
1238 wm_shutdown(void *arg)
1239 {
1240 struct wm_softc *sc = arg;
1241
1242 wm_stop(&sc->sc_ethercom.ec_if, 1);
1243 }
1244
1245 /*
1246 * wm_tx_cksum:
1247 *
1248 * Set up TCP/IP checksumming parameters for the
1249 * specified packet.
1250 */
1251 static int
1252 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1253 uint32_t *fieldsp)
1254 {
1255 struct mbuf *m0 = txs->txs_mbuf;
1256 struct livengood_tcpip_ctxdesc *t;
1257 uint32_t fields = 0, ipcs, tucs;
1258 struct ip *ip;
1259 struct ether_header *eh;
1260 int offset, iphl;
1261
1262 /*
1263 * XXX It would be nice if the mbuf pkthdr had offset
1264 * fields for the protocol headers.
1265 */
1266
1267 eh = mtod(m0, struct ether_header *);
1268 switch (htons(eh->ether_type)) {
1269 case ETHERTYPE_IP:
1270 iphl = sizeof(struct ip);
1271 offset = ETHER_HDR_LEN;
1272 break;
1273
1274 case ETHERTYPE_VLAN:
1275 iphl = sizeof(struct ip);
1276 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1277 break;
1278
1279 default:
1280 /*
1281 * Don't support this protocol or encapsulation.
1282 */
1283 *fieldsp = 0;
1284 *cmdp = 0;
1285 return (0);
1286 }
1287
1288 if (m0->m_len < (offset + iphl)) {
1289 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1290 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1291 "packet dropped\n", sc->sc_dev.dv_xname);
1292 return (ENOMEM);
1293 }
1294 m0 = txs->txs_mbuf;
1295 }
1296
1297 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1298 iphl = ip->ip_hl << 2;
1299
1300 /*
1301 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1302 * offload feature, if we load the context descriptor, we
1303 * MUST provide valid values for IPCSS and TUCSS fields.
1304 */
1305
1306 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1307 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1308 fields |= htole32(WTX_IXSM);
1309 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1310 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1311 WTX_TCPIP_IPCSE(offset + iphl - 1));
1312 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1313 /* Use the cached value. */
1314 ipcs = sc->sc_txctx_ipcs;
1315 } else {
1316 /* Just initialize it to the likely value anyway. */
1317 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1318 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1319 WTX_TCPIP_IPCSE(offset + iphl - 1));
1320 }
1321
1322 offset += iphl;
1323
1324 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1325 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1326 fields |= htole32(WTX_TXSM);
1327 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1328 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1329 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1330 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1331 /* Use the cached value. */
1332 tucs = sc->sc_txctx_tucs;
1333 } else {
1334 /* Just initialize it to a valid TCP context. */
1335 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1336 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1337 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1338 }
1339
1340 if (sc->sc_txctx_ipcs == ipcs &&
1341 sc->sc_txctx_tucs == tucs) {
1342 /* Cached context is fine. */
1343 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1344 } else {
1345 /* Fill in the context descriptor. */
1346 #ifdef WM_EVENT_COUNTERS
1347 if (sc->sc_txctx_ipcs == 0xffffffff &&
1348 sc->sc_txctx_tucs == 0xffffffff)
1349 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1350 else
1351 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1352 #endif
1353 t = (struct livengood_tcpip_ctxdesc *)
1354 &sc->sc_txdescs[sc->sc_txnext];
1355 t->tcpip_ipcs = ipcs;
1356 t->tcpip_tucs = tucs;
1357 t->tcpip_cmdlen =
1358 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1359 t->tcpip_seg = 0;
1360 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1361
1362 sc->sc_txctx_ipcs = ipcs;
1363 sc->sc_txctx_tucs = tucs;
1364
1365 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1366 txs->txs_ndesc++;
1367 }
1368
1369 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1370 *fieldsp = fields;
1371
1372 return (0);
1373 }
1374
1375 /*
1376 * wm_start: [ifnet interface function]
1377 *
1378 * Start packet transmission on the interface.
1379 */
1380 static void
1381 wm_start(struct ifnet *ifp)
1382 {
1383 struct wm_softc *sc = ifp->if_softc;
1384 struct mbuf *m0;
1385 #if 0 /* XXXJRT */
1386 struct m_tag *mtag;
1387 #endif
1388 struct wm_txsoft *txs;
1389 bus_dmamap_t dmamap;
1390 int error, nexttx, lasttx = -1, ofree, seg;
1391 uint32_t cksumcmd, cksumfields;
1392
1393 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1394 return;
1395
1396 /*
1397 * Remember the previous number of free descriptors.
1398 */
1399 ofree = sc->sc_txfree;
1400
1401 /*
1402 * Loop through the send queue, setting up transmit descriptors
1403 * until we drain the queue, or use up all available transmit
1404 * descriptors.
1405 */
1406 for (;;) {
1407 /* Grab a packet off the queue. */
1408 IFQ_POLL(&ifp->if_snd, m0);
1409 if (m0 == NULL)
1410 break;
1411
1412 DPRINTF(WM_DEBUG_TX,
1413 ("%s: TX: have packet to transmit: %p\n",
1414 sc->sc_dev.dv_xname, m0));
1415
1416 /* Get a work queue entry. */
1417 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1418 wm_txintr(sc);
1419 if (sc->sc_txsfree == 0) {
1420 DPRINTF(WM_DEBUG_TX,
1421 ("%s: TX: no free job descriptors\n",
1422 sc->sc_dev.dv_xname));
1423 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1424 break;
1425 }
1426 }
1427
1428 txs = &sc->sc_txsoft[sc->sc_txsnext];
1429 dmamap = txs->txs_dmamap;
1430
1431 /*
1432 * Load the DMA map. If this fails, the packet either
1433 * didn't fit in the allotted number of segments, or we
1434 * were short on resources. For the too-many-segments
1435 * case, we simply report an error and drop the packet,
1436 * since we can't sanely copy a jumbo packet to a single
1437 * buffer.
1438 */
1439 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1440 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1441 if (error) {
1442 if (error == EFBIG) {
1443 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1444 printf("%s: Tx packet consumes too many "
1445 "DMA segments, dropping...\n",
1446 sc->sc_dev.dv_xname);
1447 IFQ_DEQUEUE(&ifp->if_snd, m0);
1448 m_freem(m0);
1449 continue;
1450 }
1451 /*
1452 * Short on resources, just stop for now.
1453 */
1454 DPRINTF(WM_DEBUG_TX,
1455 ("%s: TX: dmamap load failed: %d\n",
1456 sc->sc_dev.dv_xname, error));
1457 break;
1458 }
1459
1460 /*
1461 * Ensure we have enough descriptors free to describe
1462 * the packet. Note, we always reserve one descriptor
1463 * at the end of the ring due to the semantics of the
1464 * TDT register, plus one more in the event we need
1465 * to re-load checksum offload context.
1466 */
1467 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1468 /*
1469 * Not enough free descriptors to transmit this
1470 * packet. We haven't committed anything yet,
1471 * so just unload the DMA map, put the packet
1472 * pack on the queue, and punt. Notify the upper
1473 * layer that there are no more slots left.
1474 */
1475 DPRINTF(WM_DEBUG_TX,
1476 ("%s: TX: need %d descriptors, have %d\n",
1477 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1478 sc->sc_txfree - 1));
1479 ifp->if_flags |= IFF_OACTIVE;
1480 bus_dmamap_unload(sc->sc_dmat, dmamap);
1481 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1482 break;
1483 }
1484
1485 IFQ_DEQUEUE(&ifp->if_snd, m0);
1486
1487 /*
1488 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1489 */
1490
1491 /* Sync the DMA map. */
1492 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1493 BUS_DMASYNC_PREWRITE);
1494
1495 DPRINTF(WM_DEBUG_TX,
1496 ("%s: TX: packet has %d DMA segments\n",
1497 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1498
1499 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1500
1501 /*
1502 * Store a pointer to the packet so that we can free it
1503 * later.
1504 *
1505 * Initially, we consider the number of descriptors the
1506 * packet uses the number of DMA segments. This may be
1507 * incremented by 1 if we do checksum offload (a descriptor
1508 * is used to set the checksum context).
1509 */
1510 txs->txs_mbuf = m0;
1511 txs->txs_firstdesc = sc->sc_txnext;
1512 txs->txs_ndesc = dmamap->dm_nsegs;
1513
1514 /*
1515 * Set up checksum offload parameters for
1516 * this packet.
1517 */
1518 if (m0->m_pkthdr.csum_flags &
1519 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1520 if (wm_tx_cksum(sc, txs, &cksumcmd,
1521 &cksumfields) != 0) {
1522 /* Error message already displayed. */
1523 bus_dmamap_unload(sc->sc_dmat, dmamap);
1524 continue;
1525 }
1526 } else {
1527 cksumcmd = 0;
1528 cksumfields = 0;
1529 }
1530
1531 cksumcmd |= htole32(WTX_CMD_IDE);
1532
1533 /*
1534 * Initialize the transmit descriptor.
1535 */
1536 for (nexttx = sc->sc_txnext, seg = 0;
1537 seg < dmamap->dm_nsegs;
1538 seg++, nexttx = WM_NEXTTX(nexttx)) {
1539 /*
1540 * Note: we currently only use 32-bit DMA
1541 * addresses.
1542 */
1543 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1544 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1545 htole32(dmamap->dm_segs[seg].ds_addr);
1546 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1547 htole32(dmamap->dm_segs[seg].ds_len);
1548 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1549 cksumfields;
1550 lasttx = nexttx;
1551
1552 DPRINTF(WM_DEBUG_TX,
1553 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1554 sc->sc_dev.dv_xname, nexttx,
1555 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1556 (uint32_t) dmamap->dm_segs[seg].ds_len));
1557 }
1558
1559 KASSERT(lasttx != -1);
1560
1561 /*
1562 * Set up the command byte on the last descriptor of
1563 * the packet. If we're in the interrupt delay window,
1564 * delay the interrupt.
1565 */
1566 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1567 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1568
1569 #if 0 /* XXXJRT */
1570 /*
1571 * If VLANs are enabled and the packet has a VLAN tag, set
1572 * up the descriptor to encapsulate the packet for us.
1573 *
1574 * This is only valid on the last descriptor of the packet.
1575 */
1576 if (sc->sc_ethercom.ec_nvlans != 0 &&
1577 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1578 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1579 htole32(WTX_CMD_VLE);
1580 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1581 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1582 }
1583 #endif /* XXXJRT */
1584
1585 txs->txs_lastdesc = lasttx;
1586
1587 DPRINTF(WM_DEBUG_TX,
1588 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1589 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1590
1591 /* Sync the descriptors we're using. */
1592 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1593 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1594
1595 /* Give the packet to the chip. */
1596 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1597
1598 DPRINTF(WM_DEBUG_TX,
1599 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1600
1601 DPRINTF(WM_DEBUG_TX,
1602 ("%s: TX: finished transmitting packet, job %d\n",
1603 sc->sc_dev.dv_xname, sc->sc_txsnext));
1604
1605 /* Advance the tx pointer. */
1606 sc->sc_txfree -= txs->txs_ndesc;
1607 sc->sc_txnext = nexttx;
1608
1609 sc->sc_txsfree--;
1610 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1611
1612 #if NBPFILTER > 0
1613 /* Pass the packet to any BPF listeners. */
1614 if (ifp->if_bpf)
1615 bpf_mtap(ifp->if_bpf, m0);
1616 #endif /* NBPFILTER > 0 */
1617 }
1618
1619 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1620 /* No more slots; notify upper layer. */
1621 ifp->if_flags |= IFF_OACTIVE;
1622 }
1623
1624 if (sc->sc_txfree != ofree) {
1625 /* Set a watchdog timer in case the chip flakes out. */
1626 ifp->if_timer = 5;
1627 }
1628 }
1629
1630 /*
1631 * wm_watchdog: [ifnet interface function]
1632 *
1633 * Watchdog timer handler.
1634 */
1635 static void
1636 wm_watchdog(struct ifnet *ifp)
1637 {
1638 struct wm_softc *sc = ifp->if_softc;
1639
1640 /*
1641 * Since we're using delayed interrupts, sweep up
1642 * before we report an error.
1643 */
1644 wm_txintr(sc);
1645
1646 if (sc->sc_txfree != WM_NTXDESC) {
1647 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1648 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1649 sc->sc_txnext);
1650 ifp->if_oerrors++;
1651
1652 /* Reset the interface. */
1653 (void) wm_init(ifp);
1654 }
1655
1656 /* Try to get more packets going. */
1657 wm_start(ifp);
1658 }
1659
1660 /*
1661 * wm_ioctl: [ifnet interface function]
1662 *
1663 * Handle control requests from the operator.
1664 */
1665 static int
1666 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1667 {
1668 struct wm_softc *sc = ifp->if_softc;
1669 struct ifreq *ifr = (struct ifreq *) data;
1670 int s, error;
1671
1672 s = splnet();
1673
1674 switch (cmd) {
1675 case SIOCSIFMEDIA:
1676 case SIOCGIFMEDIA:
1677 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1678 break;
1679 default:
1680 error = ether_ioctl(ifp, cmd, data);
1681 if (error == ENETRESET) {
1682 /*
1683 * Multicast list has changed; set the hardware filter
1684 * accordingly.
1685 */
1686 wm_set_filter(sc);
1687 error = 0;
1688 }
1689 break;
1690 }
1691
1692 /* Try to get more packets going. */
1693 wm_start(ifp);
1694
1695 splx(s);
1696 return (error);
1697 }
1698
1699 /*
1700 * wm_intr:
1701 *
1702 * Interrupt service routine.
1703 */
1704 static int
1705 wm_intr(void *arg)
1706 {
1707 struct wm_softc *sc = arg;
1708 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1709 uint32_t icr;
1710 int wantinit, handled = 0;
1711
1712 for (wantinit = 0; wantinit == 0;) {
1713 icr = CSR_READ(sc, WMREG_ICR);
1714 if ((icr & sc->sc_icr) == 0)
1715 break;
1716
1717 #if 0 /*NRND > 0*/
1718 if (RND_ENABLED(&sc->rnd_source))
1719 rnd_add_uint32(&sc->rnd_source, icr);
1720 #endif
1721
1722 handled = 1;
1723
1724 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1725 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1726 DPRINTF(WM_DEBUG_RX,
1727 ("%s: RX: got Rx intr 0x%08x\n",
1728 sc->sc_dev.dv_xname,
1729 icr & (ICR_RXDMT0|ICR_RXT0)));
1730 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1731 }
1732 #endif
1733 wm_rxintr(sc);
1734
1735 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1736 if (icr & ICR_TXDW) {
1737 DPRINTF(WM_DEBUG_TX,
1738 ("%s: TX: got TDXW interrupt\n",
1739 sc->sc_dev.dv_xname));
1740 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1741 }
1742 #endif
1743 wm_txintr(sc);
1744
1745 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1746 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1747 wm_linkintr(sc, icr);
1748 }
1749
1750 if (icr & ICR_RXO) {
1751 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1752 wantinit = 1;
1753 }
1754 }
1755
1756 if (handled) {
1757 if (wantinit)
1758 wm_init(ifp);
1759
1760 /* Try to get more packets going. */
1761 wm_start(ifp);
1762 }
1763
1764 return (handled);
1765 }
1766
1767 /*
1768 * wm_txintr:
1769 *
1770 * Helper; handle transmit interrupts.
1771 */
1772 static void
1773 wm_txintr(struct wm_softc *sc)
1774 {
1775 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1776 struct wm_txsoft *txs;
1777 uint8_t status;
1778 int i;
1779
1780 ifp->if_flags &= ~IFF_OACTIVE;
1781
1782 /*
1783 * Go through the Tx list and free mbufs for those
1784 * frames which have been transmitted.
1785 */
1786 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1787 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1788 txs = &sc->sc_txsoft[i];
1789
1790 DPRINTF(WM_DEBUG_TX,
1791 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1792
1793 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1794 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1795
1796 status = le32toh(sc->sc_txdescs[
1797 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1798 if ((status & WTX_ST_DD) == 0) {
1799 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1800 BUS_DMASYNC_PREREAD);
1801 break;
1802 }
1803
1804 DPRINTF(WM_DEBUG_TX,
1805 ("%s: TX: job %d done: descs %d..%d\n",
1806 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1807 txs->txs_lastdesc));
1808
1809 /*
1810 * XXX We should probably be using the statistics
1811 * XXX registers, but I don't know if they exist
1812 * XXX on chips before the i82544.
1813 */
1814
1815 #ifdef WM_EVENT_COUNTERS
1816 if (status & WTX_ST_TU)
1817 WM_EVCNT_INCR(&sc->sc_ev_tu);
1818 #endif /* WM_EVENT_COUNTERS */
1819
1820 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1821 ifp->if_oerrors++;
1822 if (status & WTX_ST_LC)
1823 printf("%s: late collision\n",
1824 sc->sc_dev.dv_xname);
1825 else if (status & WTX_ST_EC) {
1826 ifp->if_collisions += 16;
1827 printf("%s: excessive collisions\n",
1828 sc->sc_dev.dv_xname);
1829 }
1830 } else
1831 ifp->if_opackets++;
1832
1833 sc->sc_txfree += txs->txs_ndesc;
1834 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1835 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1836 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1837 m_freem(txs->txs_mbuf);
1838 txs->txs_mbuf = NULL;
1839 }
1840
1841 /* Update the dirty transmit buffer pointer. */
1842 sc->sc_txsdirty = i;
1843 DPRINTF(WM_DEBUG_TX,
1844 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1845
1846 /*
1847 * If there are no more pending transmissions, cancel the watchdog
1848 * timer.
1849 */
1850 if (sc->sc_txsfree == WM_TXQUEUELEN)
1851 ifp->if_timer = 0;
1852 }
1853
1854 /*
1855 * wm_rxintr:
1856 *
1857 * Helper; handle receive interrupts.
1858 */
1859 static void
1860 wm_rxintr(struct wm_softc *sc)
1861 {
1862 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1863 struct wm_rxsoft *rxs;
1864 struct mbuf *m;
1865 int i, len;
1866 uint8_t status, errors;
1867
1868 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1869 rxs = &sc->sc_rxsoft[i];
1870
1871 DPRINTF(WM_DEBUG_RX,
1872 ("%s: RX: checking descriptor %d\n",
1873 sc->sc_dev.dv_xname, i));
1874
1875 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1876
1877 status = sc->sc_rxdescs[i].wrx_status;
1878 errors = sc->sc_rxdescs[i].wrx_errors;
1879 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1880
1881 if ((status & WRX_ST_DD) == 0) {
1882 /*
1883 * We have processed all of the receive descriptors.
1884 */
1885 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1886 break;
1887 }
1888
1889 if (__predict_false(sc->sc_rxdiscard)) {
1890 DPRINTF(WM_DEBUG_RX,
1891 ("%s: RX: discarding contents of descriptor %d\n",
1892 sc->sc_dev.dv_xname, i));
1893 WM_INIT_RXDESC(sc, i);
1894 if (status & WRX_ST_EOP) {
1895 /* Reset our state. */
1896 DPRINTF(WM_DEBUG_RX,
1897 ("%s: RX: resetting rxdiscard -> 0\n",
1898 sc->sc_dev.dv_xname));
1899 sc->sc_rxdiscard = 0;
1900 }
1901 continue;
1902 }
1903
1904 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1905 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1906
1907 m = rxs->rxs_mbuf;
1908
1909 /*
1910 * Add a new receive buffer to the ring.
1911 */
1912 if (wm_add_rxbuf(sc, i) != 0) {
1913 /*
1914 * Failed, throw away what we've done so
1915 * far, and discard the rest of the packet.
1916 */
1917 ifp->if_ierrors++;
1918 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1919 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1920 WM_INIT_RXDESC(sc, i);
1921 if ((status & WRX_ST_EOP) == 0)
1922 sc->sc_rxdiscard = 1;
1923 if (sc->sc_rxhead != NULL)
1924 m_freem(sc->sc_rxhead);
1925 WM_RXCHAIN_RESET(sc);
1926 DPRINTF(WM_DEBUG_RX,
1927 ("%s: RX: Rx buffer allocation failed, "
1928 "dropping packet%s\n", sc->sc_dev.dv_xname,
1929 sc->sc_rxdiscard ? " (discard)" : ""));
1930 continue;
1931 }
1932
1933 WM_RXCHAIN_LINK(sc, m);
1934
1935 m->m_len = len;
1936
1937 DPRINTF(WM_DEBUG_RX,
1938 ("%s: RX: buffer at %p len %d\n",
1939 sc->sc_dev.dv_xname, m->m_data, len));
1940
1941 /*
1942 * If this is not the end of the packet, keep
1943 * looking.
1944 */
1945 if ((status & WRX_ST_EOP) == 0) {
1946 sc->sc_rxlen += len;
1947 DPRINTF(WM_DEBUG_RX,
1948 ("%s: RX: not yet EOP, rxlen -> %d\n",
1949 sc->sc_dev.dv_xname, sc->sc_rxlen));
1950 continue;
1951 }
1952
1953 /*
1954 * Okay, we have the entire packet now...
1955 */
1956 *sc->sc_rxtailp = NULL;
1957 m = sc->sc_rxhead;
1958 len += sc->sc_rxlen;
1959
1960 WM_RXCHAIN_RESET(sc);
1961
1962 DPRINTF(WM_DEBUG_RX,
1963 ("%s: RX: have entire packet, len -> %d\n",
1964 sc->sc_dev.dv_xname, len));
1965
1966 /*
1967 * If an error occurred, update stats and drop the packet.
1968 */
1969 if (errors &
1970 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1971 ifp->if_ierrors++;
1972 if (errors & WRX_ER_SE)
1973 printf("%s: symbol error\n",
1974 sc->sc_dev.dv_xname);
1975 else if (errors & WRX_ER_SEQ)
1976 printf("%s: receive sequence error\n",
1977 sc->sc_dev.dv_xname);
1978 else if (errors & WRX_ER_CE)
1979 printf("%s: CRC error\n",
1980 sc->sc_dev.dv_xname);
1981 m_freem(m);
1982 continue;
1983 }
1984
1985 /*
1986 * No errors. Receive the packet.
1987 *
1988 * Note, we have configured the chip to include the
1989 * CRC with every packet.
1990 */
1991 m->m_flags |= M_HASFCS;
1992 m->m_pkthdr.rcvif = ifp;
1993 m->m_pkthdr.len = len;
1994
1995 #if 0 /* XXXJRT */
1996 /*
1997 * If VLANs are enabled, VLAN packets have been unwrapped
1998 * for us. Associate the tag with the packet.
1999 */
2000 if (sc->sc_ethercom.ec_nvlans != 0 &&
2001 (status & WRX_ST_VP) != 0) {
2002 struct m_tag *vtag;
2003
2004 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2005 M_NOWAIT);
2006 if (vtag == NULL) {
2007 ifp->if_ierrors++;
2008 printf("%s: unable to allocate VLAN tag\n",
2009 sc->sc_dev.dv_xname);
2010 m_freem(m);
2011 continue;
2012 }
2013
2014 *(u_int *)(vtag + 1) =
2015 le16toh(sc->sc_rxdescs[i].wrx_special);
2016 }
2017 #endif /* XXXJRT */
2018
2019 /*
2020 * Set up checksum info for this packet.
2021 */
2022 if (status & WRX_ST_IPCS) {
2023 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2024 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2025 if (errors & WRX_ER_IPE)
2026 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2027 }
2028 if (status & WRX_ST_TCPCS) {
2029 /*
2030 * Note: we don't know if this was TCP or UDP,
2031 * so we just set both bits, and expect the
2032 * upper layers to deal.
2033 */
2034 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2035 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2036 if (errors & WRX_ER_TCPE)
2037 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2038 }
2039
2040 ifp->if_ipackets++;
2041
2042 #if NBPFILTER > 0
2043 /* Pass this up to any BPF listeners. */
2044 if (ifp->if_bpf)
2045 bpf_mtap(ifp->if_bpf, m);
2046 #endif /* NBPFILTER > 0 */
2047
2048 /* Pass it on. */
2049 (*ifp->if_input)(ifp, m);
2050 }
2051
2052 /* Update the receive pointer. */
2053 sc->sc_rxptr = i;
2054
2055 DPRINTF(WM_DEBUG_RX,
2056 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2057 }
2058
2059 /*
2060 * wm_linkintr:
2061 *
2062 * Helper; handle link interrupts.
2063 */
2064 static void
2065 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2066 {
2067 uint32_t status;
2068
2069 /*
2070 * If we get a link status interrupt on a 1000BASE-T
2071 * device, just fall into the normal MII tick path.
2072 */
2073 if (sc->sc_flags & WM_F_HAS_MII) {
2074 if (icr & ICR_LSC) {
2075 DPRINTF(WM_DEBUG_LINK,
2076 ("%s: LINK: LSC -> mii_tick\n",
2077 sc->sc_dev.dv_xname));
2078 mii_tick(&sc->sc_mii);
2079 } else if (icr & ICR_RXSEQ) {
2080 DPRINTF(WM_DEBUG_LINK,
2081 ("%s: LINK Receive sequence error\n",
2082 sc->sc_dev.dv_xname));
2083 }
2084 return;
2085 }
2086
2087 /*
2088 * If we are now receiving /C/, check for link again in
2089 * a couple of link clock ticks.
2090 */
2091 if (icr & ICR_RXCFG) {
2092 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2093 sc->sc_dev.dv_xname));
2094 sc->sc_tbi_anstate = 2;
2095 }
2096
2097 if (icr & ICR_LSC) {
2098 status = CSR_READ(sc, WMREG_STATUS);
2099 if (status & STATUS_LU) {
2100 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2101 sc->sc_dev.dv_xname,
2102 (status & STATUS_FD) ? "FDX" : "HDX"));
2103 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2104 if (status & STATUS_FD)
2105 sc->sc_tctl |=
2106 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2107 else
2108 sc->sc_tctl |=
2109 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2110 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2111 sc->sc_tbi_linkup = 1;
2112 } else {
2113 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2114 sc->sc_dev.dv_xname));
2115 sc->sc_tbi_linkup = 0;
2116 }
2117 sc->sc_tbi_anstate = 2;
2118 wm_tbi_set_linkled(sc);
2119 } else if (icr & ICR_RXSEQ) {
2120 DPRINTF(WM_DEBUG_LINK,
2121 ("%s: LINK: Receive sequence error\n",
2122 sc->sc_dev.dv_xname));
2123 }
2124 }
2125
2126 /*
2127 * wm_tick:
2128 *
2129 * One second timer, used to check link status, sweep up
2130 * completed transmit jobs, etc.
2131 */
2132 static void
2133 wm_tick(void *arg)
2134 {
2135 struct wm_softc *sc = arg;
2136 int s;
2137
2138 s = splnet();
2139
2140 if (sc->sc_flags & WM_F_HAS_MII)
2141 mii_tick(&sc->sc_mii);
2142 else
2143 wm_tbi_check_link(sc);
2144
2145 splx(s);
2146
2147 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2148 }
2149
2150 /*
2151 * wm_reset:
2152 *
2153 * Reset the i82542 chip.
2154 */
2155 static void
2156 wm_reset(struct wm_softc *sc)
2157 {
2158 int i;
2159
2160 switch (sc->sc_type) {
2161 case WM_T_82544:
2162 case WM_T_82540:
2163 case WM_T_82545:
2164 case WM_T_82546:
2165 case WM_T_82541:
2166 case WM_T_82541_2:
2167 /*
2168 * These chips have a problem with the memory-mapped
2169 * write cycle when issuing the reset, so use I/O-mapped
2170 * access, if possible.
2171 */
2172 if (sc->sc_flags & WM_F_IOH_VALID)
2173 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2174 else
2175 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2176 break;
2177
2178 case WM_T_82545_3:
2179 case WM_T_82546_3:
2180 /* Use the shadow control register on these chips. */
2181 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2182 break;
2183
2184 default:
2185 /* Everything else can safely use the documented method. */
2186 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2187 break;
2188 }
2189 delay(10000);
2190
2191 for (i = 0; i < 1000; i++) {
2192 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2193 return;
2194 delay(20);
2195 }
2196
2197 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2198 printf("%s: WARNING: reset failed to complete\n",
2199 sc->sc_dev.dv_xname);
2200 }
2201
2202 /*
2203 * wm_init: [ifnet interface function]
2204 *
2205 * Initialize the interface. Must be called at splnet().
2206 */
2207 static int
2208 wm_init(struct ifnet *ifp)
2209 {
2210 struct wm_softc *sc = ifp->if_softc;
2211 struct wm_rxsoft *rxs;
2212 int i, error = 0;
2213 uint32_t reg;
2214
2215 /*
2216 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2217 * There is a small but measurable benefit to avoiding the adjusment
2218 * of the descriptor so that the headers are aligned, for normal mtu,
2219 * on such platforms. One possibility is that the DMA itself is
2220 * slightly more efficient if the front of the entire packet (instead
2221 * of the front of the headers) is aligned.
2222 *
2223 * Note we must always set align_tweak to 0 if we are using
2224 * jumbo frames.
2225 */
2226 #ifdef __NO_STRICT_ALIGNMENT
2227 sc->sc_align_tweak = 0;
2228 #else
2229 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2230 sc->sc_align_tweak = 0;
2231 else
2232 sc->sc_align_tweak = 2;
2233 #endif /* __NO_STRICT_ALIGNMENT */
2234
2235 /* Cancel any pending I/O. */
2236 wm_stop(ifp, 0);
2237
2238 /* Reset the chip to a known state. */
2239 wm_reset(sc);
2240
2241 /* Initialize the transmit descriptor ring. */
2242 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2243 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2244 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2245 sc->sc_txfree = WM_NTXDESC;
2246 sc->sc_txnext = 0;
2247
2248 sc->sc_txctx_ipcs = 0xffffffff;
2249 sc->sc_txctx_tucs = 0xffffffff;
2250
2251 if (sc->sc_type < WM_T_82543) {
2252 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2253 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2254 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2255 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2256 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2257 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2258 } else {
2259 CSR_WRITE(sc, WMREG_TBDAH, 0);
2260 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2261 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2262 CSR_WRITE(sc, WMREG_TDH, 0);
2263 CSR_WRITE(sc, WMREG_TDT, 0);
2264 CSR_WRITE(sc, WMREG_TIDV, 128);
2265
2266 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2267 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2268 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2269 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2270 }
2271 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2272 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2273
2274 /* Initialize the transmit job descriptors. */
2275 for (i = 0; i < WM_TXQUEUELEN; i++)
2276 sc->sc_txsoft[i].txs_mbuf = NULL;
2277 sc->sc_txsfree = WM_TXQUEUELEN;
2278 sc->sc_txsnext = 0;
2279 sc->sc_txsdirty = 0;
2280
2281 /*
2282 * Initialize the receive descriptor and receive job
2283 * descriptor rings.
2284 */
2285 if (sc->sc_type < WM_T_82543) {
2286 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2287 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2288 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2289 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2290 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2291 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2292
2293 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2294 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2295 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2296 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2297 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2298 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2299 } else {
2300 CSR_WRITE(sc, WMREG_RDBAH, 0);
2301 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2302 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2303 CSR_WRITE(sc, WMREG_RDH, 0);
2304 CSR_WRITE(sc, WMREG_RDT, 0);
2305 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2306 }
2307 for (i = 0; i < WM_NRXDESC; i++) {
2308 rxs = &sc->sc_rxsoft[i];
2309 if (rxs->rxs_mbuf == NULL) {
2310 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2311 printf("%s: unable to allocate or map rx "
2312 "buffer %d, error = %d\n",
2313 sc->sc_dev.dv_xname, i, error);
2314 /*
2315 * XXX Should attempt to run with fewer receive
2316 * XXX buffers instead of just failing.
2317 */
2318 wm_rxdrain(sc);
2319 goto out;
2320 }
2321 } else
2322 WM_INIT_RXDESC(sc, i);
2323 }
2324 sc->sc_rxptr = 0;
2325 sc->sc_rxdiscard = 0;
2326 WM_RXCHAIN_RESET(sc);
2327
2328 /*
2329 * Clear out the VLAN table -- we don't use it (yet).
2330 */
2331 CSR_WRITE(sc, WMREG_VET, 0);
2332 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2333 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2334
2335 /*
2336 * Set up flow-control parameters.
2337 *
2338 * XXX Values could probably stand some tuning.
2339 */
2340 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2341 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2342 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2343 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2344
2345 if (sc->sc_type < WM_T_82543) {
2346 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2347 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2348 } else {
2349 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2350 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2351 }
2352 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2353 }
2354
2355 #if 0 /* XXXJRT */
2356 /* Deal with VLAN enables. */
2357 if (sc->sc_ethercom.ec_nvlans != 0)
2358 sc->sc_ctrl |= CTRL_VME;
2359 else
2360 #endif /* XXXJRT */
2361 sc->sc_ctrl &= ~CTRL_VME;
2362
2363 /* Write the control registers. */
2364 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2365 #if 0
2366 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2367 #endif
2368
2369 /*
2370 * Set up checksum offload parameters.
2371 */
2372 reg = CSR_READ(sc, WMREG_RXCSUM);
2373 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2374 reg |= RXCSUM_IPOFL;
2375 else
2376 reg &= ~RXCSUM_IPOFL;
2377 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2378 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2379 else {
2380 reg &= ~RXCSUM_TUOFL;
2381 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2382 reg &= ~RXCSUM_IPOFL;
2383 }
2384 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2385
2386 /*
2387 * Set up the interrupt registers.
2388 */
2389 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2390 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2391 ICR_RXO | ICR_RXT0;
2392 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2393 sc->sc_icr |= ICR_RXCFG;
2394 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2395
2396 /* Set up the inter-packet gap. */
2397 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2398
2399 #if 0 /* XXXJRT */
2400 /* Set the VLAN ethernetype. */
2401 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2402 #endif
2403
2404 /*
2405 * Set up the transmit control register; we start out with
2406 * a collision distance suitable for FDX, but update it whe
2407 * we resolve the media type.
2408 */
2409 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2410 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2411 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2412
2413 /* Set the media. */
2414 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2415
2416 /*
2417 * Set up the receive control register; we actually program
2418 * the register when we set the receive filter. Use multicast
2419 * address offset type 0.
2420 *
2421 * Only the i82544 has the ability to strip the incoming
2422 * CRC, so we don't enable that feature.
2423 */
2424 sc->sc_mchash_type = 0;
2425 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2426 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2427
2428 if(MCLBYTES == 2048) {
2429 sc->sc_rctl |= RCTL_2k;
2430 } else {
2431 /*
2432 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2433 * XXX segments, dropping" -- why?
2434 */
2435 #if 0
2436 if(sc->sc_type >= WM_T_82543) {
2437 switch(MCLBYTES) {
2438 case 4096:
2439 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2440 break;
2441 case 8192:
2442 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2443 break;
2444 case 16384:
2445 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2446 break;
2447 default:
2448 panic("wm_init: MCLBYTES %d unsupported",
2449 MCLBYTES);
2450 break;
2451 }
2452 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2453 #else
2454 panic("wm_init: MCLBYTES > 2048 not supported.");
2455 #endif
2456 }
2457
2458 /* Set the receive filter. */
2459 wm_set_filter(sc);
2460
2461 /* Start the one second link check clock. */
2462 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2463
2464 /* ...all done! */
2465 ifp->if_flags |= IFF_RUNNING;
2466 ifp->if_flags &= ~IFF_OACTIVE;
2467
2468 out:
2469 if (error)
2470 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2471 return (error);
2472 }
2473
2474 /*
2475 * wm_rxdrain:
2476 *
2477 * Drain the receive queue.
2478 */
2479 static void
2480 wm_rxdrain(struct wm_softc *sc)
2481 {
2482 struct wm_rxsoft *rxs;
2483 int i;
2484
2485 for (i = 0; i < WM_NRXDESC; i++) {
2486 rxs = &sc->sc_rxsoft[i];
2487 if (rxs->rxs_mbuf != NULL) {
2488 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2489 m_freem(rxs->rxs_mbuf);
2490 rxs->rxs_mbuf = NULL;
2491 }
2492 }
2493 }
2494
2495 /*
2496 * wm_stop: [ifnet interface function]
2497 *
2498 * Stop transmission on the interface.
2499 */
2500 static void
2501 wm_stop(struct ifnet *ifp, int disable)
2502 {
2503 struct wm_softc *sc = ifp->if_softc;
2504 struct wm_txsoft *txs;
2505 int i;
2506
2507 /* Stop the one second clock. */
2508 callout_stop(&sc->sc_tick_ch);
2509
2510 if (sc->sc_flags & WM_F_HAS_MII) {
2511 /* Down the MII. */
2512 mii_down(&sc->sc_mii);
2513 }
2514
2515 /* Stop the transmit and receive processes. */
2516 CSR_WRITE(sc, WMREG_TCTL, 0);
2517 CSR_WRITE(sc, WMREG_RCTL, 0);
2518
2519 /* Release any queued transmit buffers. */
2520 for (i = 0; i < WM_TXQUEUELEN; i++) {
2521 txs = &sc->sc_txsoft[i];
2522 if (txs->txs_mbuf != NULL) {
2523 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2524 m_freem(txs->txs_mbuf);
2525 txs->txs_mbuf = NULL;
2526 }
2527 }
2528
2529 if (disable)
2530 wm_rxdrain(sc);
2531
2532 /* Mark the interface as down and cancel the watchdog timer. */
2533 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2534 ifp->if_timer = 0;
2535 }
2536
2537 /*
2538 * wm_acquire_eeprom:
2539 *
2540 * Perform the EEPROM handshake required on some chips.
2541 */
2542 static int
2543 wm_acquire_eeprom(struct wm_softc *sc)
2544 {
2545 uint32_t reg;
2546 int x;
2547
2548 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2549 reg = CSR_READ(sc, WMREG_EECD);
2550
2551 /* Request EEPROM access. */
2552 reg |= EECD_EE_REQ;
2553 CSR_WRITE(sc, WMREG_EECD, reg);
2554
2555 /* ..and wait for it to be granted. */
2556 for (x = 0; x < 100; x++) {
2557 reg = CSR_READ(sc, WMREG_EECD);
2558 if (reg & EECD_EE_GNT)
2559 break;
2560 delay(5);
2561 }
2562 if ((reg & EECD_EE_GNT) == 0) {
2563 aprint_error("%s: could not acquire EEPROM GNT\n",
2564 sc->sc_dev.dv_xname);
2565 reg &= ~EECD_EE_REQ;
2566 CSR_WRITE(sc, WMREG_EECD, reg);
2567 return (1);
2568 }
2569 }
2570
2571 return (0);
2572 }
2573
2574 /*
2575 * wm_release_eeprom:
2576 *
2577 * Release the EEPROM mutex.
2578 */
2579 static void
2580 wm_release_eeprom(struct wm_softc *sc)
2581 {
2582 uint32_t reg;
2583
2584 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2585 reg = CSR_READ(sc, WMREG_EECD);
2586 reg &= ~EECD_EE_REQ;
2587 CSR_WRITE(sc, WMREG_EECD, reg);
2588 }
2589 }
2590
2591 /*
2592 * wm_eeprom_sendbits:
2593 *
2594 * Send a series of bits to the EEPROM.
2595 */
2596 static void
2597 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2598 {
2599 uint32_t reg;
2600 int x;
2601
2602 reg = CSR_READ(sc, WMREG_EECD);
2603
2604 for (x = nbits; x > 0; x--) {
2605 if (bits & (1U << (x - 1)))
2606 reg |= EECD_DI;
2607 else
2608 reg &= ~EECD_DI;
2609 CSR_WRITE(sc, WMREG_EECD, reg);
2610 delay(2);
2611 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2612 delay(2);
2613 CSR_WRITE(sc, WMREG_EECD, reg);
2614 delay(2);
2615 }
2616 }
2617
2618 /*
2619 * wm_eeprom_recvbits:
2620 *
2621 * Receive a series of bits from the EEPROM.
2622 */
2623 static void
2624 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2625 {
2626 uint32_t reg, val;
2627 int x;
2628
2629 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2630
2631 val = 0;
2632 for (x = nbits; x > 0; x--) {
2633 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2634 delay(2);
2635 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2636 val |= (1U << (x - 1));
2637 CSR_WRITE(sc, WMREG_EECD, reg);
2638 delay(2);
2639 }
2640 *valp = val;
2641 }
2642
2643 /*
2644 * wm_read_eeprom_uwire:
2645 *
2646 * Read a word from the EEPROM using the MicroWire protocol.
2647 */
2648 static int
2649 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2650 {
2651 uint32_t reg, val;
2652 int i;
2653
2654 for (i = 0; i < wordcnt; i++) {
2655 /* Clear SK and DI. */
2656 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2657 CSR_WRITE(sc, WMREG_EECD, reg);
2658
2659 /* Set CHIP SELECT. */
2660 reg |= EECD_CS;
2661 CSR_WRITE(sc, WMREG_EECD, reg);
2662 delay(2);
2663
2664 /* Shift in the READ command. */
2665 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2666
2667 /* Shift in address. */
2668 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2669
2670 /* Shift out the data. */
2671 wm_eeprom_recvbits(sc, &val, 16);
2672 data[i] = val & 0xffff;
2673
2674 /* Clear CHIP SELECT. */
2675 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2676 CSR_WRITE(sc, WMREG_EECD, reg);
2677 delay(2);
2678 }
2679
2680 return (0);
2681 }
2682
2683 /*
2684 * wm_spi_eeprom_ready:
2685 *
2686 * Wait for a SPI EEPROM to be ready for commands.
2687 */
2688 static int
2689 wm_spi_eeprom_ready(struct wm_softc *sc)
2690 {
2691 uint32_t val;
2692 int usec;
2693
2694 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2695 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2696 wm_eeprom_recvbits(sc, &val, 8);
2697 if ((val & SPI_SR_RDY) == 0)
2698 break;
2699 }
2700 if (usec >= SPI_MAX_RETRIES) {
2701 aprint_error("%s: EEPROM failed to become ready\n",
2702 sc->sc_dev.dv_xname);
2703 return (1);
2704 }
2705 return (0);
2706 }
2707
2708 /*
2709 * wm_read_eeprom_spi:
2710 *
2711 * Read a work from the EEPROM using the SPI protocol.
2712 */
2713 static int
2714 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2715 {
2716 uint32_t reg, val;
2717 int i;
2718 uint8_t opc;
2719
2720 /* Clear SK and CS. */
2721 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2722 CSR_WRITE(sc, WMREG_EECD, reg);
2723 delay(2);
2724
2725 if (wm_spi_eeprom_ready(sc))
2726 return (1);
2727
2728 /* Toggle CS to flush commands. */
2729 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2730 delay(2);
2731 CSR_WRITE(sc, WMREG_EECD, reg);
2732 delay(2);
2733
2734 opc = SPI_OPC_READ;
2735 if (sc->sc_ee_addrbits == 8 && word >= 128)
2736 opc |= SPI_OPC_A8;
2737
2738 wm_eeprom_sendbits(sc, opc, 8);
2739 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2740
2741 for (i = 0; i < wordcnt; i++) {
2742 wm_eeprom_recvbits(sc, &val, 16);
2743 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2744 }
2745
2746 /* Raise CS and clear SK. */
2747 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2748 CSR_WRITE(sc, WMREG_EECD, reg);
2749 delay(2);
2750
2751 return (0);
2752 }
2753
2754 /*
2755 * wm_read_eeprom:
2756 *
2757 * Read data from the serial EEPROM.
2758 */
2759 static int
2760 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2761 {
2762 int rv;
2763
2764 if (wm_acquire_eeprom(sc))
2765 return (1);
2766
2767 if (sc->sc_flags & WM_F_EEPROM_SPI)
2768 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2769 else
2770 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2771
2772 wm_release_eeprom(sc);
2773 return (rv);
2774 }
2775
2776 /*
2777 * wm_add_rxbuf:
2778 *
2779 * Add a receive buffer to the indiciated descriptor.
2780 */
2781 static int
2782 wm_add_rxbuf(struct wm_softc *sc, int idx)
2783 {
2784 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2785 struct mbuf *m;
2786 int error;
2787
2788 MGETHDR(m, M_DONTWAIT, MT_DATA);
2789 if (m == NULL)
2790 return (ENOBUFS);
2791
2792 MCLGET(m, M_DONTWAIT);
2793 if ((m->m_flags & M_EXT) == 0) {
2794 m_freem(m);
2795 return (ENOBUFS);
2796 }
2797
2798 if (rxs->rxs_mbuf != NULL)
2799 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2800
2801 rxs->rxs_mbuf = m;
2802
2803 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2804 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2805 BUS_DMA_READ|BUS_DMA_NOWAIT);
2806 if (error) {
2807 printf("%s: unable to load rx DMA map %d, error = %d\n",
2808 sc->sc_dev.dv_xname, idx, error);
2809 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2810 }
2811
2812 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2813 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2814
2815 WM_INIT_RXDESC(sc, idx);
2816
2817 return (0);
2818 }
2819
2820 /*
2821 * wm_set_ral:
2822 *
2823 * Set an entery in the receive address list.
2824 */
2825 static void
2826 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2827 {
2828 uint32_t ral_lo, ral_hi;
2829
2830 if (enaddr != NULL) {
2831 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2832 (enaddr[3] << 24);
2833 ral_hi = enaddr[4] | (enaddr[5] << 8);
2834 ral_hi |= RAL_AV;
2835 } else {
2836 ral_lo = 0;
2837 ral_hi = 0;
2838 }
2839
2840 if (sc->sc_type >= WM_T_82544) {
2841 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2842 ral_lo);
2843 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2844 ral_hi);
2845 } else {
2846 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2847 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2848 }
2849 }
2850
2851 /*
2852 * wm_mchash:
2853 *
2854 * Compute the hash of the multicast address for the 4096-bit
2855 * multicast filter.
2856 */
2857 static uint32_t
2858 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2859 {
2860 static const int lo_shift[4] = { 4, 3, 2, 0 };
2861 static const int hi_shift[4] = { 4, 5, 6, 8 };
2862 uint32_t hash;
2863
2864 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2865 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2866
2867 return (hash & 0xfff);
2868 }
2869
2870 /*
2871 * wm_set_filter:
2872 *
2873 * Set up the receive filter.
2874 */
2875 static void
2876 wm_set_filter(struct wm_softc *sc)
2877 {
2878 struct ethercom *ec = &sc->sc_ethercom;
2879 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2880 struct ether_multi *enm;
2881 struct ether_multistep step;
2882 bus_addr_t mta_reg;
2883 uint32_t hash, reg, bit;
2884 int i;
2885
2886 if (sc->sc_type >= WM_T_82544)
2887 mta_reg = WMREG_CORDOVA_MTA;
2888 else
2889 mta_reg = WMREG_MTA;
2890
2891 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2892
2893 if (ifp->if_flags & IFF_BROADCAST)
2894 sc->sc_rctl |= RCTL_BAM;
2895 if (ifp->if_flags & IFF_PROMISC) {
2896 sc->sc_rctl |= RCTL_UPE;
2897 goto allmulti;
2898 }
2899
2900 /*
2901 * Set the station address in the first RAL slot, and
2902 * clear the remaining slots.
2903 */
2904 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2905 for (i = 1; i < WM_RAL_TABSIZE; i++)
2906 wm_set_ral(sc, NULL, i);
2907
2908 /* Clear out the multicast table. */
2909 for (i = 0; i < WM_MC_TABSIZE; i++)
2910 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2911
2912 ETHER_FIRST_MULTI(step, ec, enm);
2913 while (enm != NULL) {
2914 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2915 /*
2916 * We must listen to a range of multicast addresses.
2917 * For now, just accept all multicasts, rather than
2918 * trying to set only those filter bits needed to match
2919 * the range. (At this time, the only use of address
2920 * ranges is for IP multicast routing, for which the
2921 * range is big enough to require all bits set.)
2922 */
2923 goto allmulti;
2924 }
2925
2926 hash = wm_mchash(sc, enm->enm_addrlo);
2927
2928 reg = (hash >> 5) & 0x7f;
2929 bit = hash & 0x1f;
2930
2931 hash = CSR_READ(sc, mta_reg + (reg << 2));
2932 hash |= 1U << bit;
2933
2934 /* XXX Hardware bug?? */
2935 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2936 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2937 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2938 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2939 } else
2940 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2941
2942 ETHER_NEXT_MULTI(step, enm);
2943 }
2944
2945 ifp->if_flags &= ~IFF_ALLMULTI;
2946 goto setit;
2947
2948 allmulti:
2949 ifp->if_flags |= IFF_ALLMULTI;
2950 sc->sc_rctl |= RCTL_MPE;
2951
2952 setit:
2953 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2954 }
2955
2956 /*
2957 * wm_tbi_mediainit:
2958 *
2959 * Initialize media for use on 1000BASE-X devices.
2960 */
2961 static void
2962 wm_tbi_mediainit(struct wm_softc *sc)
2963 {
2964 const char *sep = "";
2965
2966 if (sc->sc_type < WM_T_82543)
2967 sc->sc_tipg = TIPG_WM_DFLT;
2968 else
2969 sc->sc_tipg = TIPG_LG_DFLT;
2970
2971 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2972 wm_tbi_mediastatus);
2973
2974 /*
2975 * SWD Pins:
2976 *
2977 * 0 = Link LED (output)
2978 * 1 = Loss Of Signal (input)
2979 */
2980 sc->sc_ctrl |= CTRL_SWDPIO(0);
2981 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2982
2983 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2984
2985 #define ADD(ss, mm, dd) \
2986 do { \
2987 printf("%s%s", sep, ss); \
2988 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2989 sep = ", "; \
2990 } while (/*CONSTCOND*/0)
2991
2992 printf("%s: ", sc->sc_dev.dv_xname);
2993 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2994 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2995 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2996 printf("\n");
2997
2998 #undef ADD
2999
3000 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3001 }
3002
3003 /*
3004 * wm_tbi_mediastatus: [ifmedia interface function]
3005 *
3006 * Get the current interface media status on a 1000BASE-X device.
3007 */
3008 static void
3009 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3010 {
3011 struct wm_softc *sc = ifp->if_softc;
3012
3013 ifmr->ifm_status = IFM_AVALID;
3014 ifmr->ifm_active = IFM_ETHER;
3015
3016 if (sc->sc_tbi_linkup == 0) {
3017 ifmr->ifm_active |= IFM_NONE;
3018 return;
3019 }
3020
3021 ifmr->ifm_status |= IFM_ACTIVE;
3022 ifmr->ifm_active |= IFM_1000_SX;
3023 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3024 ifmr->ifm_active |= IFM_FDX;
3025 }
3026
3027 /*
3028 * wm_tbi_mediachange: [ifmedia interface function]
3029 *
3030 * Set hardware to newly-selected media on a 1000BASE-X device.
3031 */
3032 static int
3033 wm_tbi_mediachange(struct ifnet *ifp)
3034 {
3035 struct wm_softc *sc = ifp->if_softc;
3036 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3037 uint32_t status;
3038 int i;
3039
3040 sc->sc_txcw = ife->ifm_data;
3041 if (sc->sc_ctrl & CTRL_RFCE)
3042 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
3043 if (sc->sc_ctrl & CTRL_TFCE)
3044 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
3045 sc->sc_txcw |= TXCW_ANE;
3046
3047 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3048 delay(10000);
3049
3050 sc->sc_tbi_anstate = 0;
3051
3052 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3053 /* Have signal; wait for the link to come up. */
3054 for (i = 0; i < 50; i++) {
3055 delay(10000);
3056 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3057 break;
3058 }
3059
3060 status = CSR_READ(sc, WMREG_STATUS);
3061 if (status & STATUS_LU) {
3062 /* Link is up. */
3063 DPRINTF(WM_DEBUG_LINK,
3064 ("%s: LINK: set media -> link up %s\n",
3065 sc->sc_dev.dv_xname,
3066 (status & STATUS_FD) ? "FDX" : "HDX"));
3067 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3068 if (status & STATUS_FD)
3069 sc->sc_tctl |=
3070 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3071 else
3072 sc->sc_tctl |=
3073 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3074 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3075 sc->sc_tbi_linkup = 1;
3076 } else {
3077 /* Link is down. */
3078 DPRINTF(WM_DEBUG_LINK,
3079 ("%s: LINK: set media -> link down\n",
3080 sc->sc_dev.dv_xname));
3081 sc->sc_tbi_linkup = 0;
3082 }
3083 } else {
3084 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3085 sc->sc_dev.dv_xname));
3086 sc->sc_tbi_linkup = 0;
3087 }
3088
3089 wm_tbi_set_linkled(sc);
3090
3091 return (0);
3092 }
3093
3094 /*
3095 * wm_tbi_set_linkled:
3096 *
3097 * Update the link LED on 1000BASE-X devices.
3098 */
3099 static void
3100 wm_tbi_set_linkled(struct wm_softc *sc)
3101 {
3102
3103 if (sc->sc_tbi_linkup)
3104 sc->sc_ctrl |= CTRL_SWDPIN(0);
3105 else
3106 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3107
3108 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3109 }
3110
3111 /*
3112 * wm_tbi_check_link:
3113 *
3114 * Check the link on 1000BASE-X devices.
3115 */
3116 static void
3117 wm_tbi_check_link(struct wm_softc *sc)
3118 {
3119 uint32_t rxcw, ctrl, status;
3120
3121 if (sc->sc_tbi_anstate == 0)
3122 return;
3123 else if (sc->sc_tbi_anstate > 1) {
3124 DPRINTF(WM_DEBUG_LINK,
3125 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3126 sc->sc_tbi_anstate));
3127 sc->sc_tbi_anstate--;
3128 return;
3129 }
3130
3131 sc->sc_tbi_anstate = 0;
3132
3133 rxcw = CSR_READ(sc, WMREG_RXCW);
3134 ctrl = CSR_READ(sc, WMREG_CTRL);
3135 status = CSR_READ(sc, WMREG_STATUS);
3136
3137 if ((status & STATUS_LU) == 0) {
3138 DPRINTF(WM_DEBUG_LINK,
3139 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3140 sc->sc_tbi_linkup = 0;
3141 } else {
3142 DPRINTF(WM_DEBUG_LINK,
3143 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3144 (status & STATUS_FD) ? "FDX" : "HDX"));
3145 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3146 if (status & STATUS_FD)
3147 sc->sc_tctl |=
3148 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3149 else
3150 sc->sc_tctl |=
3151 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3152 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3153 sc->sc_tbi_linkup = 1;
3154 }
3155
3156 wm_tbi_set_linkled(sc);
3157 }
3158
3159 /*
3160 * wm_gmii_reset:
3161 *
3162 * Reset the PHY.
3163 */
3164 static void
3165 wm_gmii_reset(struct wm_softc *sc)
3166 {
3167 uint32_t reg;
3168
3169 if (sc->sc_type >= WM_T_82544) {
3170 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3171 delay(20000);
3172
3173 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3174 delay(20000);
3175 } else {
3176 /* The PHY reset pin is active-low. */
3177 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3178 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3179 CTRL_EXT_SWDPIN(4));
3180 reg |= CTRL_EXT_SWDPIO(4);
3181
3182 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3183 delay(10);
3184
3185 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3186 delay(10);
3187
3188 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3189 delay(10);
3190 #if 0
3191 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3192 #endif
3193 }
3194 }
3195
3196 /*
3197 * wm_gmii_mediainit:
3198 *
3199 * Initialize media for use on 1000BASE-T devices.
3200 */
3201 static void
3202 wm_gmii_mediainit(struct wm_softc *sc)
3203 {
3204 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3205
3206 /* We have MII. */
3207 sc->sc_flags |= WM_F_HAS_MII;
3208
3209 sc->sc_tipg = TIPG_1000T_DFLT;
3210
3211 /*
3212 * Let the chip set speed/duplex on its own based on
3213 * signals from the PHY.
3214 */
3215 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3216 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3217
3218 /* Initialize our media structures and probe the GMII. */
3219 sc->sc_mii.mii_ifp = ifp;
3220
3221 if (sc->sc_type >= WM_T_82544) {
3222 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3223 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3224 } else {
3225 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3226 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3227 }
3228 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3229
3230 wm_gmii_reset(sc);
3231
3232 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3233 wm_gmii_mediastatus);
3234
3235 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3236 MII_OFFSET_ANY, 0);
3237 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3238 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3239 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3240 } else
3241 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3242 }
3243
3244 /*
3245 * wm_gmii_mediastatus: [ifmedia interface function]
3246 *
3247 * Get the current interface media status on a 1000BASE-T device.
3248 */
3249 static void
3250 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3251 {
3252 struct wm_softc *sc = ifp->if_softc;
3253
3254 mii_pollstat(&sc->sc_mii);
3255 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3256 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3257 }
3258
3259 /*
3260 * wm_gmii_mediachange: [ifmedia interface function]
3261 *
3262 * Set hardware to newly-selected media on a 1000BASE-T device.
3263 */
3264 static int
3265 wm_gmii_mediachange(struct ifnet *ifp)
3266 {
3267 struct wm_softc *sc = ifp->if_softc;
3268
3269 if (ifp->if_flags & IFF_UP)
3270 mii_mediachg(&sc->sc_mii);
3271 return (0);
3272 }
3273
3274 #define MDI_IO CTRL_SWDPIN(2)
3275 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3276 #define MDI_CLK CTRL_SWDPIN(3)
3277
3278 static void
3279 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3280 {
3281 uint32_t i, v;
3282
3283 v = CSR_READ(sc, WMREG_CTRL);
3284 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3285 v |= MDI_DIR | CTRL_SWDPIO(3);
3286
3287 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3288 if (data & i)
3289 v |= MDI_IO;
3290 else
3291 v &= ~MDI_IO;
3292 CSR_WRITE(sc, WMREG_CTRL, v);
3293 delay(10);
3294 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3295 delay(10);
3296 CSR_WRITE(sc, WMREG_CTRL, v);
3297 delay(10);
3298 }
3299 }
3300
3301 static uint32_t
3302 i82543_mii_recvbits(struct wm_softc *sc)
3303 {
3304 uint32_t v, i, data = 0;
3305
3306 v = CSR_READ(sc, WMREG_CTRL);
3307 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3308 v |= CTRL_SWDPIO(3);
3309
3310 CSR_WRITE(sc, WMREG_CTRL, v);
3311 delay(10);
3312 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3313 delay(10);
3314 CSR_WRITE(sc, WMREG_CTRL, v);
3315 delay(10);
3316
3317 for (i = 0; i < 16; i++) {
3318 data <<= 1;
3319 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3320 delay(10);
3321 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3322 data |= 1;
3323 CSR_WRITE(sc, WMREG_CTRL, v);
3324 delay(10);
3325 }
3326
3327 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3328 delay(10);
3329 CSR_WRITE(sc, WMREG_CTRL, v);
3330 delay(10);
3331
3332 return (data);
3333 }
3334
3335 #undef MDI_IO
3336 #undef MDI_DIR
3337 #undef MDI_CLK
3338
3339 /*
3340 * wm_gmii_i82543_readreg: [mii interface function]
3341 *
3342 * Read a PHY register on the GMII (i82543 version).
3343 */
3344 static int
3345 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3346 {
3347 struct wm_softc *sc = (void *) self;
3348 int rv;
3349
3350 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3351 i82543_mii_sendbits(sc, reg | (phy << 5) |
3352 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3353 rv = i82543_mii_recvbits(sc) & 0xffff;
3354
3355 DPRINTF(WM_DEBUG_GMII,
3356 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3357 sc->sc_dev.dv_xname, phy, reg, rv));
3358
3359 return (rv);
3360 }
3361
3362 /*
3363 * wm_gmii_i82543_writereg: [mii interface function]
3364 *
3365 * Write a PHY register on the GMII (i82543 version).
3366 */
3367 static void
3368 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3369 {
3370 struct wm_softc *sc = (void *) self;
3371
3372 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3373 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3374 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3375 (MII_COMMAND_START << 30), 32);
3376 }
3377
3378 /*
3379 * wm_gmii_i82544_readreg: [mii interface function]
3380 *
3381 * Read a PHY register on the GMII.
3382 */
3383 static int
3384 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3385 {
3386 struct wm_softc *sc = (void *) self;
3387 uint32_t mdic = 0;
3388 int i, rv;
3389
3390 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3391 MDIC_REGADD(reg));
3392
3393 for (i = 0; i < 100; i++) {
3394 mdic = CSR_READ(sc, WMREG_MDIC);
3395 if (mdic & MDIC_READY)
3396 break;
3397 delay(10);
3398 }
3399
3400 if ((mdic & MDIC_READY) == 0) {
3401 printf("%s: MDIC read timed out: phy %d reg %d\n",
3402 sc->sc_dev.dv_xname, phy, reg);
3403 rv = 0;
3404 } else if (mdic & MDIC_E) {
3405 #if 0 /* This is normal if no PHY is present. */
3406 printf("%s: MDIC read error: phy %d reg %d\n",
3407 sc->sc_dev.dv_xname, phy, reg);
3408 #endif
3409 rv = 0;
3410 } else {
3411 rv = MDIC_DATA(mdic);
3412 if (rv == 0xffff)
3413 rv = 0;
3414 }
3415
3416 return (rv);
3417 }
3418
3419 /*
3420 * wm_gmii_i82544_writereg: [mii interface function]
3421 *
3422 * Write a PHY register on the GMII.
3423 */
3424 static void
3425 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3426 {
3427 struct wm_softc *sc = (void *) self;
3428 uint32_t mdic = 0;
3429 int i;
3430
3431 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3432 MDIC_REGADD(reg) | MDIC_DATA(val));
3433
3434 for (i = 0; i < 100; i++) {
3435 mdic = CSR_READ(sc, WMREG_MDIC);
3436 if (mdic & MDIC_READY)
3437 break;
3438 delay(10);
3439 }
3440
3441 if ((mdic & MDIC_READY) == 0)
3442 printf("%s: MDIC write timed out: phy %d reg %d\n",
3443 sc->sc_dev.dv_xname, phy, reg);
3444 else if (mdic & MDIC_E)
3445 printf("%s: MDIC write error: phy %d reg %d\n",
3446 sc->sc_dev.dv_xname, phy, reg);
3447 }
3448
3449 /*
3450 * wm_gmii_statchg: [mii interface function]
3451 *
3452 * Callback from MII layer when media changes.
3453 */
3454 static void
3455 wm_gmii_statchg(struct device *self)
3456 {
3457 struct wm_softc *sc = (void *) self;
3458
3459 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3460
3461 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3462 DPRINTF(WM_DEBUG_LINK,
3463 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3464 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3465 } else {
3466 DPRINTF(WM_DEBUG_LINK,
3467 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3468 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3469 }
3470
3471 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3472 }
3473