if_wm.c revision 1.57 1 /* $NetBSD: if_wm.c,v 1.57 2003/10/22 15:50:39 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 * - Figure out what to do with the i82545GM and i82546GB
45 * SERDES controllers.
46 */
47
48 #include <sys/cdefs.h>
49 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.57 2003/10/22 15:50:39 thorpej Exp $");
50
51 #include "bpfilter.h"
52 #include "rnd.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <netinet/in.h> /* XXX for struct ip */
82 #include <netinet/in_systm.h> /* XXX for struct ip */
83 #include <netinet/ip.h> /* XXX for struct ip */
84 #include <netinet/tcp.h> /* XXX for struct tcphdr */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and manage
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 typedef enum {
183 WM_T_unknown = 0,
184 WM_T_82542_2_0, /* i82542 2.0 (really old) */
185 WM_T_82542_2_1, /* i82542 2.1+ (old) */
186 WM_T_82543, /* i82543 */
187 WM_T_82544, /* i82544 */
188 WM_T_82540, /* i82540 */
189 WM_T_82545, /* i82545 */
190 WM_T_82545_3, /* i82545 3.0+ */
191 WM_T_82546, /* i82546 */
192 WM_T_82546_3, /* i82546 3.0+ */
193 WM_T_82541, /* i82541 */
194 WM_T_82541_2, /* i82541 2.0+ */
195 WM_T_82547, /* i82547 */
196 WM_T_82547_2, /* i82547 2.0+ */
197 } wm_chip_type;
198
199 /*
200 * Software state per device.
201 */
202 struct wm_softc {
203 struct device sc_dev; /* generic device information */
204 bus_space_tag_t sc_st; /* bus space tag */
205 bus_space_handle_t sc_sh; /* bus space handle */
206 bus_space_tag_t sc_iot; /* I/O space tag */
207 bus_space_handle_t sc_ioh; /* I/O space handle */
208 bus_dma_tag_t sc_dmat; /* bus DMA tag */
209 struct ethercom sc_ethercom; /* ethernet common data */
210 void *sc_sdhook; /* shutdown hook */
211
212 wm_chip_type sc_type; /* chip type */
213 int sc_flags; /* flags; see below */
214 int sc_bus_speed; /* PCI/PCIX bus speed */
215 int sc_pcix_offset; /* PCIX capability register offset */
216
217 void *sc_ih; /* interrupt cookie */
218
219 int sc_ee_addrbits; /* EEPROM address bits */
220
221 struct mii_data sc_mii; /* MII/media information */
222
223 struct callout sc_tick_ch; /* tick callout */
224
225 bus_dmamap_t sc_cddmamap; /* control data DMA map */
226 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
227
228 int sc_align_tweak;
229
230 /*
231 * Software state for the transmit and receive descriptors.
232 */
233 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
234 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
235
236 /*
237 * Control data structures.
238 */
239 struct wm_control_data *sc_control_data;
240 #define sc_txdescs sc_control_data->wcd_txdescs
241 #define sc_rxdescs sc_control_data->wcd_rxdescs
242
243 #ifdef WM_EVENT_COUNTERS
244 /* Event counters. */
245 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
246 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
247 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
248 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
249 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
250 struct evcnt sc_ev_rxintr; /* Rx interrupts */
251 struct evcnt sc_ev_linkintr; /* Link interrupts */
252
253 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
254 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
255 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
256 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
257
258 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
259 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
260 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
261
262 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
263 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
264
265 struct evcnt sc_ev_tu; /* Tx underrun */
266 #endif /* WM_EVENT_COUNTERS */
267
268 bus_addr_t sc_tdt_reg; /* offset of TDT register */
269
270 int sc_txfree; /* number of free Tx descriptors */
271 int sc_txnext; /* next ready Tx descriptor */
272
273 int sc_txsfree; /* number of free Tx jobs */
274 int sc_txsnext; /* next free Tx job */
275 int sc_txsdirty; /* dirty Tx jobs */
276
277 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
278 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
279
280 bus_addr_t sc_rdt_reg; /* offset of RDT register */
281
282 int sc_rxptr; /* next ready Rx descriptor/queue ent */
283 int sc_rxdiscard;
284 int sc_rxlen;
285 struct mbuf *sc_rxhead;
286 struct mbuf *sc_rxtail;
287 struct mbuf **sc_rxtailp;
288
289 uint32_t sc_ctrl; /* prototype CTRL register */
290 #if 0
291 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
292 #endif
293 uint32_t sc_icr; /* prototype interrupt bits */
294 uint32_t sc_tctl; /* prototype TCTL register */
295 uint32_t sc_rctl; /* prototype RCTL register */
296 uint32_t sc_txcw; /* prototype TXCW register */
297 uint32_t sc_tipg; /* prototype TIPG register */
298
299 int sc_tbi_linkup; /* TBI link status */
300 int sc_tbi_anstate; /* autonegotiation state */
301
302 int sc_mchash_type; /* multicast filter offset */
303
304 #if NRND > 0
305 rndsource_element_t rnd_source; /* random source */
306 #endif
307 };
308
309 #define WM_RXCHAIN_RESET(sc) \
310 do { \
311 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
312 *(sc)->sc_rxtailp = NULL; \
313 (sc)->sc_rxlen = 0; \
314 } while (/*CONSTCOND*/0)
315
316 #define WM_RXCHAIN_LINK(sc, m) \
317 do { \
318 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
319 (sc)->sc_rxtailp = &(m)->m_next; \
320 } while (/*CONSTCOND*/0)
321
322 /* sc_flags */
323 #define WM_F_HAS_MII 0x01 /* has MII */
324 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
325 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
326 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
327 #define WM_F_BUS64 0x20 /* bus is 64-bit */
328 #define WM_F_PCIX 0x40 /* bus is PCI-X */
329
330 #ifdef WM_EVENT_COUNTERS
331 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
332 #else
333 #define WM_EVCNT_INCR(ev) /* nothing */
334 #endif
335
336 #define CSR_READ(sc, reg) \
337 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
338 #define CSR_WRITE(sc, reg, val) \
339 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
340
341 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
342 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
343
344 #define WM_CDTXSYNC(sc, x, n, ops) \
345 do { \
346 int __x, __n; \
347 \
348 __x = (x); \
349 __n = (n); \
350 \
351 /* If it will wrap around, sync to the end of the ring. */ \
352 if ((__x + __n) > WM_NTXDESC) { \
353 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
354 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
355 (WM_NTXDESC - __x), (ops)); \
356 __n -= (WM_NTXDESC - __x); \
357 __x = 0; \
358 } \
359 \
360 /* Now sync whatever is left. */ \
361 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
362 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
363 } while (/*CONSTCOND*/0)
364
365 #define WM_CDRXSYNC(sc, x, ops) \
366 do { \
367 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
368 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
369 } while (/*CONSTCOND*/0)
370
371 #define WM_INIT_RXDESC(sc, x) \
372 do { \
373 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
374 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
375 struct mbuf *__m = __rxs->rxs_mbuf; \
376 \
377 /* \
378 * Note: We scoot the packet forward 2 bytes in the buffer \
379 * so that the payload after the Ethernet header is aligned \
380 * to a 4-byte boundary. \
381 * \
382 * XXX BRAINDAMAGE ALERT! \
383 * The stupid chip uses the same size for every buffer, which \
384 * is set in the Receive Control register. We are using the 2K \
385 * size option, but what we REALLY want is (2K - 2)! For this \
386 * reason, we can't "scoot" packets longer than the standard \
387 * Ethernet MTU. On strict-alignment platforms, if the total \
388 * size exceeds (2K - 2) we set align_tweak to 0 and let \
389 * the upper layer copy the headers. \
390 */ \
391 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
392 \
393 __rxd->wrx_addr.wa_low = \
394 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
395 (sc)->sc_align_tweak); \
396 __rxd->wrx_addr.wa_high = 0; \
397 __rxd->wrx_len = 0; \
398 __rxd->wrx_cksum = 0; \
399 __rxd->wrx_status = 0; \
400 __rxd->wrx_errors = 0; \
401 __rxd->wrx_special = 0; \
402 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
403 \
404 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
405 } while (/*CONSTCOND*/0)
406
407 static void wm_start(struct ifnet *);
408 static void wm_watchdog(struct ifnet *);
409 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
410 static int wm_init(struct ifnet *);
411 static void wm_stop(struct ifnet *, int);
412
413 static void wm_shutdown(void *);
414
415 static void wm_reset(struct wm_softc *);
416 static void wm_rxdrain(struct wm_softc *);
417 static int wm_add_rxbuf(struct wm_softc *, int);
418 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
419 static void wm_tick(void *);
420
421 static void wm_set_filter(struct wm_softc *);
422
423 static int wm_intr(void *);
424 static void wm_txintr(struct wm_softc *);
425 static void wm_rxintr(struct wm_softc *);
426 static void wm_linkintr(struct wm_softc *, uint32_t);
427
428 static void wm_tbi_mediainit(struct wm_softc *);
429 static int wm_tbi_mediachange(struct ifnet *);
430 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
431
432 static void wm_tbi_set_linkled(struct wm_softc *);
433 static void wm_tbi_check_link(struct wm_softc *);
434
435 static void wm_gmii_reset(struct wm_softc *);
436
437 static int wm_gmii_i82543_readreg(struct device *, int, int);
438 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
439
440 static int wm_gmii_i82544_readreg(struct device *, int, int);
441 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
442
443 static void wm_gmii_statchg(struct device *);
444
445 static void wm_gmii_mediainit(struct wm_softc *);
446 static int wm_gmii_mediachange(struct ifnet *);
447 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
448
449 static int wm_match(struct device *, struct cfdata *, void *);
450 static void wm_attach(struct device *, struct device *, void *);
451
452 CFATTACH_DECL(wm, sizeof(struct wm_softc),
453 wm_match, wm_attach, NULL, NULL);
454
455 /*
456 * Devices supported by this driver.
457 */
458 const struct wm_product {
459 pci_vendor_id_t wmp_vendor;
460 pci_product_id_t wmp_product;
461 const char *wmp_name;
462 wm_chip_type wmp_type;
463 int wmp_flags;
464 #define WMP_F_1000X 0x01
465 #define WMP_F_1000T 0x02
466 } wm_products[] = {
467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
468 "Intel i82542 1000BASE-X Ethernet",
469 WM_T_82542_2_1, WMP_F_1000X },
470
471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
472 "Intel i82543GC 1000BASE-X Ethernet",
473 WM_T_82543, WMP_F_1000X },
474
475 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
476 "Intel i82543GC 1000BASE-T Ethernet",
477 WM_T_82543, WMP_F_1000T },
478
479 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
480 "Intel i82544EI 1000BASE-T Ethernet",
481 WM_T_82544, WMP_F_1000T },
482
483 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
484 "Intel i82544EI 1000BASE-X Ethernet",
485 WM_T_82544, WMP_F_1000X },
486
487 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
488 "Intel i82544GC 1000BASE-T Ethernet",
489 WM_T_82544, WMP_F_1000T },
490
491 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
492 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
493 WM_T_82544, WMP_F_1000T },
494
495 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
496 "Intel i82540EM 1000BASE-T Ethernet",
497 WM_T_82540, WMP_F_1000T },
498
499 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
500 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
501 WM_T_82540, WMP_F_1000T },
502
503 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
504 "Intel i82540EP 1000BASE-T Ethernet",
505 WM_T_82540, WMP_F_1000T },
506
507 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
508 "Intel i82540EP 1000BASE-T Ethernet",
509 WM_T_82540, WMP_F_1000T },
510
511 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
512 "Intel i82540EP 1000BASE-T Ethernet",
513 WM_T_82540, WMP_F_1000T },
514
515 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
516 "Intel i82545EM 1000BASE-T Ethernet",
517 WM_T_82545, WMP_F_1000T },
518
519 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
520 "Intel i82545GM 1000BASE-T Ethernet",
521 WM_T_82545_3, WMP_F_1000T },
522
523 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
524 "Intel i82545GM 1000BASE-X Ethernet",
525 WM_T_82545_3, WMP_F_1000X },
526 #if 0
527 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
528 "Intel i82545GM Gigabit Ethernet (SERDES)",
529 WM_T_82545_3, WMP_F_SERDES },
530 #endif
531 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
532 "Intel i82546EB 1000BASE-T Ethernet",
533 WM_T_82546, WMP_F_1000T },
534
535 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
536 "Intel i82546EB 1000BASE-T Ethernet",
537 WM_T_82546, WMP_F_1000T },
538
539 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
540 "Intel i82545EM 1000BASE-X Ethernet",
541 WM_T_82545, WMP_F_1000X },
542
543 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
544 "Intel i82546EB 1000BASE-X Ethernet",
545 WM_T_82546, WMP_F_1000X },
546
547 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
548 "Intel i82546GB 1000BASE-T Ethernet",
549 WM_T_82546_3, WMP_F_1000T },
550
551 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
552 "Intel i82546GB 1000BASE-X Ethernet",
553 WM_T_82546_3, WMP_F_1000X },
554 #if 0
555 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
556 "Intel i82546GB Gigabit Ethernet (SERDES)",
557 WM_T_82546_3, WMP_F_SERDES },
558 #endif
559 #if 0 /* not yet... */
560 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
561 "Intel i82541EI Mobile 1000BASE-T Ethernet",
562 WM_T_82541, WMP_F_1000T },
563
564 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
565 "Intel i82541ER 1000BASE-T Ethernet",
566 WM_T_82541_2, WMP_F_1000T },
567
568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
569 "Intel i82541GI 1000BASE-T Ethernet",
570 WM_T_82541_2, WMP_F_1000T },
571
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
573 "Intel i82541GI Mobile 1000BASE-T Ethernet",
574 WM_T_82541_2, WMP_F_1000T },
575
576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
577 "Intel i82547EI 1000BASE-T Ethernet",
578 WM_T_82547, WMP_F_1000T },
579
580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
581 "Intel i82547GI 1000BASE-T Ethernet",
582 WM_T_82547_2, WMP_F_1000T },
583 #endif /* not yet... */
584 { 0, 0,
585 NULL,
586 0, 0 },
587 };
588
589 #ifdef WM_EVENT_COUNTERS
590 #if WM_NTXSEGS != 16
591 #error Update wm_txseg_evcnt_names
592 #endif
593 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
594 "txseg1",
595 "txseg2",
596 "txseg3",
597 "txseg4",
598 "txseg5",
599 "txseg6",
600 "txseg7",
601 "txseg8",
602 "txseg9",
603 "txseg10",
604 "txseg11",
605 "txseg12",
606 "txseg13",
607 "txseg14",
608 "txseg15",
609 "txseg16",
610 };
611 #endif /* WM_EVENT_COUNTERS */
612
613 #if 0 /* Not currently used */
614 static __inline uint32_t
615 wm_io_read(struct wm_softc *sc, int reg)
616 {
617
618 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
619 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
620 }
621 #endif
622
623 static __inline void
624 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
625 {
626
627 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
628 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
629 }
630
631 static const struct wm_product *
632 wm_lookup(const struct pci_attach_args *pa)
633 {
634 const struct wm_product *wmp;
635
636 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
637 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
638 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
639 return (wmp);
640 }
641 return (NULL);
642 }
643
644 static int
645 wm_match(struct device *parent, struct cfdata *cf, void *aux)
646 {
647 struct pci_attach_args *pa = aux;
648
649 if (wm_lookup(pa) != NULL)
650 return (1);
651
652 return (0);
653 }
654
655 static void
656 wm_attach(struct device *parent, struct device *self, void *aux)
657 {
658 struct wm_softc *sc = (void *) self;
659 struct pci_attach_args *pa = aux;
660 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
661 pci_chipset_tag_t pc = pa->pa_pc;
662 pci_intr_handle_t ih;
663 const char *intrstr = NULL;
664 const char *eetype;
665 bus_space_tag_t memt;
666 bus_space_handle_t memh;
667 bus_dma_segment_t seg;
668 int memh_valid;
669 int i, rseg, error;
670 const struct wm_product *wmp;
671 uint8_t enaddr[ETHER_ADDR_LEN];
672 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
673 pcireg_t preg, memtype;
674 uint32_t reg;
675 int pmreg;
676
677 callout_init(&sc->sc_tick_ch);
678
679 wmp = wm_lookup(pa);
680 if (wmp == NULL) {
681 printf("\n");
682 panic("wm_attach: impossible");
683 }
684
685 sc->sc_dmat = pa->pa_dmat;
686
687 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
688 aprint_naive(": Ethernet controller\n");
689 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
690
691 sc->sc_type = wmp->wmp_type;
692 if (sc->sc_type < WM_T_82543) {
693 if (preg < 2) {
694 aprint_error("%s: i82542 must be at least rev. 2\n",
695 sc->sc_dev.dv_xname);
696 return;
697 }
698 if (preg < 3)
699 sc->sc_type = WM_T_82542_2_0;
700 }
701
702 /*
703 * Map the device. All devices support memory-mapped acccess,
704 * and it is really required for normal operation.
705 */
706 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
707 switch (memtype) {
708 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
709 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
710 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
711 memtype, 0, &memt, &memh, NULL, NULL) == 0);
712 break;
713 default:
714 memh_valid = 0;
715 }
716
717 if (memh_valid) {
718 sc->sc_st = memt;
719 sc->sc_sh = memh;
720 } else {
721 aprint_error("%s: unable to map device registers\n",
722 sc->sc_dev.dv_xname);
723 return;
724 }
725
726 /*
727 * In addition, i82544 and later support I/O mapped indirect
728 * register access. It is not desirable (nor supported in
729 * this driver) to use it for normal operation, though it is
730 * required to work around bugs in some chip versions.
731 */
732 if (sc->sc_type >= WM_T_82544) {
733 /* First we have to find the I/O BAR. */
734 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
735 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
736 PCI_MAPREG_TYPE_IO)
737 break;
738 }
739 if (i == PCI_MAPREG_END)
740 aprint_error("%s: WARNING: unable to find I/O BAR\n",
741 sc->sc_dev.dv_xname);
742 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
743 0, &sc->sc_iot, &sc->sc_ioh,
744 NULL, NULL) == 0)
745 sc->sc_flags |= WM_F_IOH_VALID;
746 else
747 aprint_error("%s: WARNING: unable to map I/O space\n",
748 sc->sc_dev.dv_xname);
749 }
750
751 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
752 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
753 preg |= PCI_COMMAND_MASTER_ENABLE;
754 if (sc->sc_type < WM_T_82542_2_1)
755 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
756 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
757
758 /* Get it out of power save mode, if needed. */
759 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
760 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
761 PCI_PMCSR_STATE_MASK;
762 if (preg == PCI_PMCSR_STATE_D3) {
763 /*
764 * The card has lost all configuration data in
765 * this state, so punt.
766 */
767 aprint_error("%s: unable to wake from power state D3\n",
768 sc->sc_dev.dv_xname);
769 return;
770 }
771 if (preg != PCI_PMCSR_STATE_D0) {
772 aprint_normal("%s: waking up from power state D%d\n",
773 sc->sc_dev.dv_xname, preg);
774 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
775 PCI_PMCSR_STATE_D0);
776 }
777 }
778
779 /*
780 * Map and establish our interrupt.
781 */
782 if (pci_intr_map(pa, &ih)) {
783 aprint_error("%s: unable to map interrupt\n",
784 sc->sc_dev.dv_xname);
785 return;
786 }
787 intrstr = pci_intr_string(pc, ih);
788 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
789 if (sc->sc_ih == NULL) {
790 aprint_error("%s: unable to establish interrupt",
791 sc->sc_dev.dv_xname);
792 if (intrstr != NULL)
793 aprint_normal(" at %s", intrstr);
794 aprint_normal("\n");
795 return;
796 }
797 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
798
799 /*
800 * Determine a few things about the bus we're connected to.
801 */
802 if (sc->sc_type < WM_T_82543) {
803 /* We don't really know the bus characteristics here. */
804 sc->sc_bus_speed = 33;
805 } else {
806 reg = CSR_READ(sc, WMREG_STATUS);
807 if (reg & STATUS_BUS64)
808 sc->sc_flags |= WM_F_BUS64;
809 if (sc->sc_type >= WM_T_82544 &&
810 (reg & STATUS_PCIX_MODE) != 0) {
811 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
812
813 sc->sc_flags |= WM_F_PCIX;
814 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
815 PCI_CAP_PCIX,
816 &sc->sc_pcix_offset, NULL) == 0)
817 aprint_error("%s: unable to find PCIX "
818 "capability\n", sc->sc_dev.dv_xname);
819 else if (sc->sc_type != WM_T_82545_3 &&
820 sc->sc_type != WM_T_82546_3) {
821 /*
822 * Work around a problem caused by the BIOS
823 * setting the max memory read byte count
824 * incorrectly.
825 */
826 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
827 sc->sc_pcix_offset + PCI_PCIX_CMD);
828 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
829 sc->sc_pcix_offset + PCI_PCIX_STATUS);
830
831 bytecnt =
832 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
833 PCI_PCIX_CMD_BYTECNT_SHIFT;
834 maxb =
835 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
836 PCI_PCIX_STATUS_MAXB_SHIFT;
837 if (bytecnt > maxb) {
838 aprint_verbose("%s: resetting PCI-X "
839 "MMRBC: %d -> %d\n",
840 sc->sc_dev.dv_xname,
841 512 << bytecnt, 512 << maxb);
842 pcix_cmd = (pcix_cmd &
843 ~PCI_PCIX_CMD_BYTECNT_MASK) |
844 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
845 pci_conf_write(pa->pa_pc, pa->pa_tag,
846 sc->sc_pcix_offset + PCI_PCIX_CMD,
847 pcix_cmd);
848 }
849 }
850 }
851 /*
852 * The quad port adapter is special; it has a PCIX-PCIX
853 * bridge on the board, and can run the secondary bus at
854 * a higher speed.
855 */
856 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
857 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
858 : 66;
859 } else if (sc->sc_flags & WM_F_PCIX) {
860 switch (STATUS_PCIXSPD(reg)) {
861 case STATUS_PCIXSPD_50_66:
862 sc->sc_bus_speed = 66;
863 break;
864 case STATUS_PCIXSPD_66_100:
865 sc->sc_bus_speed = 100;
866 break;
867 case STATUS_PCIXSPD_100_133:
868 sc->sc_bus_speed = 133;
869 break;
870 default:
871 aprint_error(
872 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
873 sc->sc_dev.dv_xname, STATUS_PCIXSPD(reg));
874 sc->sc_bus_speed = 66;
875 }
876 } else
877 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
878 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
879 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
880 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
881 }
882
883 /*
884 * Allocate the control data structures, and create and load the
885 * DMA map for it.
886 */
887 if ((error = bus_dmamem_alloc(sc->sc_dmat,
888 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
889 0)) != 0) {
890 aprint_error(
891 "%s: unable to allocate control data, error = %d\n",
892 sc->sc_dev.dv_xname, error);
893 goto fail_0;
894 }
895
896 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
897 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
898 0)) != 0) {
899 aprint_error("%s: unable to map control data, error = %d\n",
900 sc->sc_dev.dv_xname, error);
901 goto fail_1;
902 }
903
904 if ((error = bus_dmamap_create(sc->sc_dmat,
905 sizeof(struct wm_control_data), 1,
906 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
907 aprint_error("%s: unable to create control data DMA map, "
908 "error = %d\n", sc->sc_dev.dv_xname, error);
909 goto fail_2;
910 }
911
912 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
913 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
914 0)) != 0) {
915 aprint_error(
916 "%s: unable to load control data DMA map, error = %d\n",
917 sc->sc_dev.dv_xname, error);
918 goto fail_3;
919 }
920
921 /*
922 * Create the transmit buffer DMA maps.
923 */
924 for (i = 0; i < WM_TXQUEUELEN; i++) {
925 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
926 WM_NTXSEGS, MCLBYTES, 0, 0,
927 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
928 aprint_error("%s: unable to create Tx DMA map %d, "
929 "error = %d\n", sc->sc_dev.dv_xname, i, error);
930 goto fail_4;
931 }
932 }
933
934 /*
935 * Create the receive buffer DMA maps.
936 */
937 for (i = 0; i < WM_NRXDESC; i++) {
938 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
939 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
940 aprint_error("%s: unable to create Rx DMA map %d, "
941 "error = %d\n", sc->sc_dev.dv_xname, i, error);
942 goto fail_5;
943 }
944 sc->sc_rxsoft[i].rxs_mbuf = NULL;
945 }
946
947 /*
948 * Reset the chip to a known state.
949 */
950 wm_reset(sc);
951
952 /*
953 * Get some information about the EEPROM.
954 */
955 if (sc->sc_type >= WM_T_82540)
956 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
957 if (sc->sc_type <= WM_T_82544)
958 sc->sc_ee_addrbits = 6;
959 else if (sc->sc_type <= WM_T_82546_3) {
960 reg = CSR_READ(sc, WMREG_EECD);
961 if (reg & EECD_EE_SIZE)
962 sc->sc_ee_addrbits = 8;
963 else
964 sc->sc_ee_addrbits = 6;
965 } else if (sc->sc_type <= WM_T_82547_2) {
966 reg = CSR_READ(sc, WMREG_EECD);
967 if (reg & EECD_EE_TYPE) {
968 sc->sc_flags |= WM_F_EEPROM_SPI;
969 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
970 } else
971 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
972 } else {
973 /* Assume everything else is SPI. */
974 reg = CSR_READ(sc, WMREG_EECD);
975 sc->sc_flags |= WM_F_EEPROM_SPI;
976 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
977 }
978 if (sc->sc_flags & WM_F_EEPROM_SPI)
979 eetype = "SPI";
980 else
981 eetype = "MicroWire";
982 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
983 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
984 sc->sc_ee_addrbits, eetype);
985
986 /*
987 * Read the Ethernet address from the EEPROM.
988 */
989 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
990 sizeof(myea) / sizeof(myea[0]), myea)) {
991 aprint_error("%s: unable to read Ethernet address\n",
992 sc->sc_dev.dv_xname);
993 return;
994 }
995 enaddr[0] = myea[0] & 0xff;
996 enaddr[1] = myea[0] >> 8;
997 enaddr[2] = myea[1] & 0xff;
998 enaddr[3] = myea[1] >> 8;
999 enaddr[4] = myea[2] & 0xff;
1000 enaddr[5] = myea[2] >> 8;
1001
1002 /*
1003 * Toggle the LSB of the MAC address on the second port
1004 * of the i82546.
1005 */
1006 if (sc->sc_type == WM_T_82546) {
1007 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1008 enaddr[5] ^= 1;
1009 }
1010
1011 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1012 ether_sprintf(enaddr));
1013
1014 /*
1015 * Read the config info from the EEPROM, and set up various
1016 * bits in the control registers based on their contents.
1017 */
1018 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1019 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1020 sc->sc_dev.dv_xname);
1021 return;
1022 }
1023 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1024 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1025 sc->sc_dev.dv_xname);
1026 return;
1027 }
1028 if (sc->sc_type >= WM_T_82544) {
1029 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1030 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1031 sc->sc_dev.dv_xname);
1032 return;
1033 }
1034 }
1035
1036 if (cfg1 & EEPROM_CFG1_ILOS)
1037 sc->sc_ctrl |= CTRL_ILOS;
1038 if (sc->sc_type >= WM_T_82544) {
1039 sc->sc_ctrl |=
1040 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1041 CTRL_SWDPIO_SHIFT;
1042 sc->sc_ctrl |=
1043 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1044 CTRL_SWDPINS_SHIFT;
1045 } else {
1046 sc->sc_ctrl |=
1047 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1048 CTRL_SWDPIO_SHIFT;
1049 }
1050
1051 #if 0
1052 if (sc->sc_type >= WM_T_82544) {
1053 if (cfg1 & EEPROM_CFG1_IPS0)
1054 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1055 if (cfg1 & EEPROM_CFG1_IPS1)
1056 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1057 sc->sc_ctrl_ext |=
1058 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1059 CTRL_EXT_SWDPIO_SHIFT;
1060 sc->sc_ctrl_ext |=
1061 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1062 CTRL_EXT_SWDPINS_SHIFT;
1063 } else {
1064 sc->sc_ctrl_ext |=
1065 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1066 CTRL_EXT_SWDPIO_SHIFT;
1067 }
1068 #endif
1069
1070 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1071 #if 0
1072 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1073 #endif
1074
1075 /*
1076 * Set up some register offsets that are different between
1077 * the i82542 and the i82543 and later chips.
1078 */
1079 if (sc->sc_type < WM_T_82543) {
1080 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1081 sc->sc_tdt_reg = WMREG_OLD_TDT;
1082 } else {
1083 sc->sc_rdt_reg = WMREG_RDT;
1084 sc->sc_tdt_reg = WMREG_TDT;
1085 }
1086
1087 /*
1088 * Determine if we should use flow control. We should
1089 * always use it, unless we're on a i82542 < 2.1.
1090 */
1091 if (sc->sc_type >= WM_T_82542_2_1)
1092 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1093
1094 /*
1095 * Determine if we're TBI or GMII mode, and initialize the
1096 * media structures accordingly.
1097 */
1098 if (sc->sc_type < WM_T_82543 ||
1099 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1100 if (wmp->wmp_flags & WMP_F_1000T)
1101 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1102 "product!\n", sc->sc_dev.dv_xname);
1103 wm_tbi_mediainit(sc);
1104 } else {
1105 if (wmp->wmp_flags & WMP_F_1000X)
1106 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1107 "product!\n", sc->sc_dev.dv_xname);
1108 wm_gmii_mediainit(sc);
1109 }
1110
1111 ifp = &sc->sc_ethercom.ec_if;
1112 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1113 ifp->if_softc = sc;
1114 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1115 ifp->if_ioctl = wm_ioctl;
1116 ifp->if_start = wm_start;
1117 ifp->if_watchdog = wm_watchdog;
1118 ifp->if_init = wm_init;
1119 ifp->if_stop = wm_stop;
1120 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
1121 IFQ_SET_READY(&ifp->if_snd);
1122
1123 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1124
1125 /*
1126 * If we're a i82543 or greater, we can support VLANs.
1127 */
1128 if (sc->sc_type >= WM_T_82543)
1129 sc->sc_ethercom.ec_capabilities |=
1130 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1131
1132 /*
1133 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1134 * on i82543 and later.
1135 */
1136 if (sc->sc_type >= WM_T_82543)
1137 ifp->if_capabilities |=
1138 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1139
1140 /*
1141 * Attach the interface.
1142 */
1143 if_attach(ifp);
1144 ether_ifattach(ifp, enaddr);
1145 #if NRND > 0
1146 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1147 RND_TYPE_NET, 0);
1148 #endif
1149
1150 #ifdef WM_EVENT_COUNTERS
1151 /* Attach event counters. */
1152 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1153 NULL, sc->sc_dev.dv_xname, "txsstall");
1154 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1155 NULL, sc->sc_dev.dv_xname, "txdstall");
1156 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1157 NULL, sc->sc_dev.dv_xname, "txforceintr");
1158 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1159 NULL, sc->sc_dev.dv_xname, "txdw");
1160 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1161 NULL, sc->sc_dev.dv_xname, "txqe");
1162 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1163 NULL, sc->sc_dev.dv_xname, "rxintr");
1164 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1165 NULL, sc->sc_dev.dv_xname, "linkintr");
1166
1167 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1168 NULL, sc->sc_dev.dv_xname, "rxipsum");
1169 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1170 NULL, sc->sc_dev.dv_xname, "rxtusum");
1171 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1172 NULL, sc->sc_dev.dv_xname, "txipsum");
1173 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1174 NULL, sc->sc_dev.dv_xname, "txtusum");
1175
1176 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1177 NULL, sc->sc_dev.dv_xname, "txctx init");
1178 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1179 NULL, sc->sc_dev.dv_xname, "txctx hit");
1180 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1181 NULL, sc->sc_dev.dv_xname, "txctx miss");
1182
1183 for (i = 0; i < WM_NTXSEGS; i++)
1184 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1185 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1186
1187 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1188 NULL, sc->sc_dev.dv_xname, "txdrop");
1189
1190 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1191 NULL, sc->sc_dev.dv_xname, "tu");
1192 #endif /* WM_EVENT_COUNTERS */
1193
1194 /*
1195 * Make sure the interface is shutdown during reboot.
1196 */
1197 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1198 if (sc->sc_sdhook == NULL)
1199 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1200 sc->sc_dev.dv_xname);
1201 return;
1202
1203 /*
1204 * Free any resources we've allocated during the failed attach
1205 * attempt. Do this in reverse order and fall through.
1206 */
1207 fail_5:
1208 for (i = 0; i < WM_NRXDESC; i++) {
1209 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1210 bus_dmamap_destroy(sc->sc_dmat,
1211 sc->sc_rxsoft[i].rxs_dmamap);
1212 }
1213 fail_4:
1214 for (i = 0; i < WM_TXQUEUELEN; i++) {
1215 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1216 bus_dmamap_destroy(sc->sc_dmat,
1217 sc->sc_txsoft[i].txs_dmamap);
1218 }
1219 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1220 fail_3:
1221 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1222 fail_2:
1223 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1224 sizeof(struct wm_control_data));
1225 fail_1:
1226 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1227 fail_0:
1228 return;
1229 }
1230
1231 /*
1232 * wm_shutdown:
1233 *
1234 * Make sure the interface is stopped at reboot time.
1235 */
1236 static void
1237 wm_shutdown(void *arg)
1238 {
1239 struct wm_softc *sc = arg;
1240
1241 wm_stop(&sc->sc_ethercom.ec_if, 1);
1242 }
1243
1244 /*
1245 * wm_tx_cksum:
1246 *
1247 * Set up TCP/IP checksumming parameters for the
1248 * specified packet.
1249 */
1250 static int
1251 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1252 uint32_t *fieldsp)
1253 {
1254 struct mbuf *m0 = txs->txs_mbuf;
1255 struct livengood_tcpip_ctxdesc *t;
1256 uint32_t fields = 0, ipcs, tucs;
1257 struct ip *ip;
1258 struct ether_header *eh;
1259 int offset, iphl;
1260
1261 /*
1262 * XXX It would be nice if the mbuf pkthdr had offset
1263 * fields for the protocol headers.
1264 */
1265
1266 eh = mtod(m0, struct ether_header *);
1267 switch (htons(eh->ether_type)) {
1268 case ETHERTYPE_IP:
1269 iphl = sizeof(struct ip);
1270 offset = ETHER_HDR_LEN;
1271 break;
1272
1273 case ETHERTYPE_VLAN:
1274 iphl = sizeof(struct ip);
1275 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1276 break;
1277
1278 default:
1279 /*
1280 * Don't support this protocol or encapsulation.
1281 */
1282 *fieldsp = 0;
1283 *cmdp = 0;
1284 return (0);
1285 }
1286
1287 if (m0->m_len < (offset + iphl)) {
1288 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1289 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1290 "packet dropped\n", sc->sc_dev.dv_xname);
1291 return (ENOMEM);
1292 }
1293 m0 = txs->txs_mbuf;
1294 }
1295
1296 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1297 iphl = ip->ip_hl << 2;
1298
1299 /*
1300 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1301 * offload feature, if we load the context descriptor, we
1302 * MUST provide valid values for IPCSS and TUCSS fields.
1303 */
1304
1305 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1306 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1307 fields |= htole32(WTX_IXSM);
1308 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1309 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1310 WTX_TCPIP_IPCSE(offset + iphl - 1));
1311 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1312 /* Use the cached value. */
1313 ipcs = sc->sc_txctx_ipcs;
1314 } else {
1315 /* Just initialize it to the likely value anyway. */
1316 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1317 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1318 WTX_TCPIP_IPCSE(offset + iphl - 1));
1319 }
1320
1321 offset += iphl;
1322
1323 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1324 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1325 fields |= htole32(WTX_TXSM);
1326 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1327 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1328 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1329 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1330 /* Use the cached value. */
1331 tucs = sc->sc_txctx_tucs;
1332 } else {
1333 /* Just initialize it to a valid TCP context. */
1334 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1335 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1336 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1337 }
1338
1339 if (sc->sc_txctx_ipcs == ipcs &&
1340 sc->sc_txctx_tucs == tucs) {
1341 /* Cached context is fine. */
1342 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1343 } else {
1344 /* Fill in the context descriptor. */
1345 #ifdef WM_EVENT_COUNTERS
1346 if (sc->sc_txctx_ipcs == 0xffffffff &&
1347 sc->sc_txctx_tucs == 0xffffffff)
1348 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1349 else
1350 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1351 #endif
1352 t = (struct livengood_tcpip_ctxdesc *)
1353 &sc->sc_txdescs[sc->sc_txnext];
1354 t->tcpip_ipcs = ipcs;
1355 t->tcpip_tucs = tucs;
1356 t->tcpip_cmdlen =
1357 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1358 t->tcpip_seg = 0;
1359 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1360
1361 sc->sc_txctx_ipcs = ipcs;
1362 sc->sc_txctx_tucs = tucs;
1363
1364 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1365 txs->txs_ndesc++;
1366 }
1367
1368 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1369 *fieldsp = fields;
1370
1371 return (0);
1372 }
1373
1374 /*
1375 * wm_start: [ifnet interface function]
1376 *
1377 * Start packet transmission on the interface.
1378 */
1379 static void
1380 wm_start(struct ifnet *ifp)
1381 {
1382 struct wm_softc *sc = ifp->if_softc;
1383 struct mbuf *m0;
1384 #if 0 /* XXXJRT */
1385 struct m_tag *mtag;
1386 #endif
1387 struct wm_txsoft *txs;
1388 bus_dmamap_t dmamap;
1389 int error, nexttx, lasttx, ofree, seg;
1390 uint32_t cksumcmd, cksumfields;
1391
1392 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1393 return;
1394
1395 /*
1396 * Remember the previous number of free descriptors.
1397 */
1398 ofree = sc->sc_txfree;
1399
1400 /*
1401 * Loop through the send queue, setting up transmit descriptors
1402 * until we drain the queue, or use up all available transmit
1403 * descriptors.
1404 */
1405 for (;;) {
1406 /* Grab a packet off the queue. */
1407 IFQ_POLL(&ifp->if_snd, m0);
1408 if (m0 == NULL)
1409 break;
1410
1411 DPRINTF(WM_DEBUG_TX,
1412 ("%s: TX: have packet to transmit: %p\n",
1413 sc->sc_dev.dv_xname, m0));
1414
1415 /* Get a work queue entry. */
1416 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1417 wm_txintr(sc);
1418 if (sc->sc_txsfree == 0) {
1419 DPRINTF(WM_DEBUG_TX,
1420 ("%s: TX: no free job descriptors\n",
1421 sc->sc_dev.dv_xname));
1422 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1423 break;
1424 }
1425 }
1426
1427 txs = &sc->sc_txsoft[sc->sc_txsnext];
1428 dmamap = txs->txs_dmamap;
1429
1430 /*
1431 * Load the DMA map. If this fails, the packet either
1432 * didn't fit in the allotted number of segments, or we
1433 * were short on resources. For the too-many-segments
1434 * case, we simply report an error and drop the packet,
1435 * since we can't sanely copy a jumbo packet to a single
1436 * buffer.
1437 */
1438 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1439 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1440 if (error) {
1441 if (error == EFBIG) {
1442 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1443 printf("%s: Tx packet consumes too many "
1444 "DMA segments, dropping...\n",
1445 sc->sc_dev.dv_xname);
1446 IFQ_DEQUEUE(&ifp->if_snd, m0);
1447 m_freem(m0);
1448 continue;
1449 }
1450 /*
1451 * Short on resources, just stop for now.
1452 */
1453 DPRINTF(WM_DEBUG_TX,
1454 ("%s: TX: dmamap load failed: %d\n",
1455 sc->sc_dev.dv_xname, error));
1456 break;
1457 }
1458
1459 /*
1460 * Ensure we have enough descriptors free to describe
1461 * the packet. Note, we always reserve one descriptor
1462 * at the end of the ring due to the semantics of the
1463 * TDT register, plus one more in the event we need
1464 * to re-load checksum offload context.
1465 */
1466 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1467 /*
1468 * Not enough free descriptors to transmit this
1469 * packet. We haven't committed anything yet,
1470 * so just unload the DMA map, put the packet
1471 * pack on the queue, and punt. Notify the upper
1472 * layer that there are no more slots left.
1473 */
1474 DPRINTF(WM_DEBUG_TX,
1475 ("%s: TX: need %d descriptors, have %d\n",
1476 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1477 sc->sc_txfree - 1));
1478 ifp->if_flags |= IFF_OACTIVE;
1479 bus_dmamap_unload(sc->sc_dmat, dmamap);
1480 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1481 break;
1482 }
1483
1484 IFQ_DEQUEUE(&ifp->if_snd, m0);
1485
1486 /*
1487 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1488 */
1489
1490 /* Sync the DMA map. */
1491 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1492 BUS_DMASYNC_PREWRITE);
1493
1494 DPRINTF(WM_DEBUG_TX,
1495 ("%s: TX: packet has %d DMA segments\n",
1496 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1497
1498 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1499
1500 /*
1501 * Store a pointer to the packet so that we can free it
1502 * later.
1503 *
1504 * Initially, we consider the number of descriptors the
1505 * packet uses the number of DMA segments. This may be
1506 * incremented by 1 if we do checksum offload (a descriptor
1507 * is used to set the checksum context).
1508 */
1509 txs->txs_mbuf = m0;
1510 txs->txs_firstdesc = sc->sc_txnext;
1511 txs->txs_ndesc = dmamap->dm_nsegs;
1512
1513 /*
1514 * Set up checksum offload parameters for
1515 * this packet.
1516 */
1517 if (m0->m_pkthdr.csum_flags &
1518 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1519 if (wm_tx_cksum(sc, txs, &cksumcmd,
1520 &cksumfields) != 0) {
1521 /* Error message already displayed. */
1522 bus_dmamap_unload(sc->sc_dmat, dmamap);
1523 continue;
1524 }
1525 } else {
1526 cksumcmd = 0;
1527 cksumfields = 0;
1528 }
1529
1530 cksumcmd |= htole32(WTX_CMD_IDE);
1531
1532 /*
1533 * Initialize the transmit descriptor.
1534 */
1535 for (nexttx = sc->sc_txnext, seg = 0;
1536 seg < dmamap->dm_nsegs;
1537 seg++, nexttx = WM_NEXTTX(nexttx)) {
1538 /*
1539 * Note: we currently only use 32-bit DMA
1540 * addresses.
1541 */
1542 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1543 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1544 htole32(dmamap->dm_segs[seg].ds_addr);
1545 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1546 htole32(dmamap->dm_segs[seg].ds_len);
1547 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1548 cksumfields;
1549 lasttx = nexttx;
1550
1551 DPRINTF(WM_DEBUG_TX,
1552 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1553 sc->sc_dev.dv_xname, nexttx,
1554 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1555 (uint32_t) dmamap->dm_segs[seg].ds_len));
1556 }
1557
1558 /*
1559 * Set up the command byte on the last descriptor of
1560 * the packet. If we're in the interrupt delay window,
1561 * delay the interrupt.
1562 */
1563 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1564 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1565
1566 #if 0 /* XXXJRT */
1567 /*
1568 * If VLANs are enabled and the packet has a VLAN tag, set
1569 * up the descriptor to encapsulate the packet for us.
1570 *
1571 * This is only valid on the last descriptor of the packet.
1572 */
1573 if (sc->sc_ethercom.ec_nvlans != 0 &&
1574 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1575 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1576 htole32(WTX_CMD_VLE);
1577 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1578 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1579 }
1580 #endif /* XXXJRT */
1581
1582 txs->txs_lastdesc = lasttx;
1583
1584 DPRINTF(WM_DEBUG_TX,
1585 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1586 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1587
1588 /* Sync the descriptors we're using. */
1589 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1590 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1591
1592 /* Give the packet to the chip. */
1593 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1594
1595 DPRINTF(WM_DEBUG_TX,
1596 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1597
1598 DPRINTF(WM_DEBUG_TX,
1599 ("%s: TX: finished transmitting packet, job %d\n",
1600 sc->sc_dev.dv_xname, sc->sc_txsnext));
1601
1602 /* Advance the tx pointer. */
1603 sc->sc_txfree -= txs->txs_ndesc;
1604 sc->sc_txnext = nexttx;
1605
1606 sc->sc_txsfree--;
1607 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1608
1609 #if NBPFILTER > 0
1610 /* Pass the packet to any BPF listeners. */
1611 if (ifp->if_bpf)
1612 bpf_mtap(ifp->if_bpf, m0);
1613 #endif /* NBPFILTER > 0 */
1614 }
1615
1616 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1617 /* No more slots; notify upper layer. */
1618 ifp->if_flags |= IFF_OACTIVE;
1619 }
1620
1621 if (sc->sc_txfree != ofree) {
1622 /* Set a watchdog timer in case the chip flakes out. */
1623 ifp->if_timer = 5;
1624 }
1625 }
1626
1627 /*
1628 * wm_watchdog: [ifnet interface function]
1629 *
1630 * Watchdog timer handler.
1631 */
1632 static void
1633 wm_watchdog(struct ifnet *ifp)
1634 {
1635 struct wm_softc *sc = ifp->if_softc;
1636
1637 /*
1638 * Since we're using delayed interrupts, sweep up
1639 * before we report an error.
1640 */
1641 wm_txintr(sc);
1642
1643 if (sc->sc_txfree != WM_NTXDESC) {
1644 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1645 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1646 sc->sc_txnext);
1647 ifp->if_oerrors++;
1648
1649 /* Reset the interface. */
1650 (void) wm_init(ifp);
1651 }
1652
1653 /* Try to get more packets going. */
1654 wm_start(ifp);
1655 }
1656
1657 /*
1658 * wm_ioctl: [ifnet interface function]
1659 *
1660 * Handle control requests from the operator.
1661 */
1662 static int
1663 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1664 {
1665 struct wm_softc *sc = ifp->if_softc;
1666 struct ifreq *ifr = (struct ifreq *) data;
1667 int s, error;
1668
1669 s = splnet();
1670
1671 switch (cmd) {
1672 case SIOCSIFMEDIA:
1673 case SIOCGIFMEDIA:
1674 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1675 break;
1676 default:
1677 error = ether_ioctl(ifp, cmd, data);
1678 if (error == ENETRESET) {
1679 /*
1680 * Multicast list has changed; set the hardware filter
1681 * accordingly.
1682 */
1683 wm_set_filter(sc);
1684 error = 0;
1685 }
1686 break;
1687 }
1688
1689 /* Try to get more packets going. */
1690 wm_start(ifp);
1691
1692 splx(s);
1693 return (error);
1694 }
1695
1696 /*
1697 * wm_intr:
1698 *
1699 * Interrupt service routine.
1700 */
1701 static int
1702 wm_intr(void *arg)
1703 {
1704 struct wm_softc *sc = arg;
1705 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1706 uint32_t icr;
1707 int wantinit, handled = 0;
1708
1709 for (wantinit = 0; wantinit == 0;) {
1710 icr = CSR_READ(sc, WMREG_ICR);
1711 if ((icr & sc->sc_icr) == 0)
1712 break;
1713
1714 #if 0 /*NRND > 0*/
1715 if (RND_ENABLED(&sc->rnd_source))
1716 rnd_add_uint32(&sc->rnd_source, icr);
1717 #endif
1718
1719 handled = 1;
1720
1721 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1722 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1723 DPRINTF(WM_DEBUG_RX,
1724 ("%s: RX: got Rx intr 0x%08x\n",
1725 sc->sc_dev.dv_xname,
1726 icr & (ICR_RXDMT0|ICR_RXT0)));
1727 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1728 }
1729 #endif
1730 wm_rxintr(sc);
1731
1732 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1733 if (icr & ICR_TXDW) {
1734 DPRINTF(WM_DEBUG_TX,
1735 ("%s: TX: got TDXW interrupt\n",
1736 sc->sc_dev.dv_xname));
1737 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1738 }
1739 #endif
1740 wm_txintr(sc);
1741
1742 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1743 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1744 wm_linkintr(sc, icr);
1745 }
1746
1747 if (icr & ICR_RXO) {
1748 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1749 wantinit = 1;
1750 }
1751 }
1752
1753 if (handled) {
1754 if (wantinit)
1755 wm_init(ifp);
1756
1757 /* Try to get more packets going. */
1758 wm_start(ifp);
1759 }
1760
1761 return (handled);
1762 }
1763
1764 /*
1765 * wm_txintr:
1766 *
1767 * Helper; handle transmit interrupts.
1768 */
1769 static void
1770 wm_txintr(struct wm_softc *sc)
1771 {
1772 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1773 struct wm_txsoft *txs;
1774 uint8_t status;
1775 int i;
1776
1777 ifp->if_flags &= ~IFF_OACTIVE;
1778
1779 /*
1780 * Go through the Tx list and free mbufs for those
1781 * frames which have been transmitted.
1782 */
1783 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1784 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1785 txs = &sc->sc_txsoft[i];
1786
1787 DPRINTF(WM_DEBUG_TX,
1788 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1789
1790 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1791 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1792
1793 status = le32toh(sc->sc_txdescs[
1794 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1795 if ((status & WTX_ST_DD) == 0) {
1796 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1797 BUS_DMASYNC_PREREAD);
1798 break;
1799 }
1800
1801 DPRINTF(WM_DEBUG_TX,
1802 ("%s: TX: job %d done: descs %d..%d\n",
1803 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1804 txs->txs_lastdesc));
1805
1806 /*
1807 * XXX We should probably be using the statistics
1808 * XXX registers, but I don't know if they exist
1809 * XXX on chips before the i82544.
1810 */
1811
1812 #ifdef WM_EVENT_COUNTERS
1813 if (status & WTX_ST_TU)
1814 WM_EVCNT_INCR(&sc->sc_ev_tu);
1815 #endif /* WM_EVENT_COUNTERS */
1816
1817 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1818 ifp->if_oerrors++;
1819 if (status & WTX_ST_LC)
1820 printf("%s: late collision\n",
1821 sc->sc_dev.dv_xname);
1822 else if (status & WTX_ST_EC) {
1823 ifp->if_collisions += 16;
1824 printf("%s: excessive collisions\n",
1825 sc->sc_dev.dv_xname);
1826 }
1827 } else
1828 ifp->if_opackets++;
1829
1830 sc->sc_txfree += txs->txs_ndesc;
1831 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1832 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1833 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1834 m_freem(txs->txs_mbuf);
1835 txs->txs_mbuf = NULL;
1836 }
1837
1838 /* Update the dirty transmit buffer pointer. */
1839 sc->sc_txsdirty = i;
1840 DPRINTF(WM_DEBUG_TX,
1841 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1842
1843 /*
1844 * If there are no more pending transmissions, cancel the watchdog
1845 * timer.
1846 */
1847 if (sc->sc_txsfree == WM_TXQUEUELEN)
1848 ifp->if_timer = 0;
1849 }
1850
1851 /*
1852 * wm_rxintr:
1853 *
1854 * Helper; handle receive interrupts.
1855 */
1856 static void
1857 wm_rxintr(struct wm_softc *sc)
1858 {
1859 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1860 struct wm_rxsoft *rxs;
1861 struct mbuf *m;
1862 int i, len;
1863 uint8_t status, errors;
1864
1865 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1866 rxs = &sc->sc_rxsoft[i];
1867
1868 DPRINTF(WM_DEBUG_RX,
1869 ("%s: RX: checking descriptor %d\n",
1870 sc->sc_dev.dv_xname, i));
1871
1872 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1873
1874 status = sc->sc_rxdescs[i].wrx_status;
1875 errors = sc->sc_rxdescs[i].wrx_errors;
1876 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1877
1878 if ((status & WRX_ST_DD) == 0) {
1879 /*
1880 * We have processed all of the receive descriptors.
1881 */
1882 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1883 break;
1884 }
1885
1886 if (__predict_false(sc->sc_rxdiscard)) {
1887 DPRINTF(WM_DEBUG_RX,
1888 ("%s: RX: discarding contents of descriptor %d\n",
1889 sc->sc_dev.dv_xname, i));
1890 WM_INIT_RXDESC(sc, i);
1891 if (status & WRX_ST_EOP) {
1892 /* Reset our state. */
1893 DPRINTF(WM_DEBUG_RX,
1894 ("%s: RX: resetting rxdiscard -> 0\n",
1895 sc->sc_dev.dv_xname));
1896 sc->sc_rxdiscard = 0;
1897 }
1898 continue;
1899 }
1900
1901 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1902 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1903
1904 m = rxs->rxs_mbuf;
1905
1906 /*
1907 * Add a new receive buffer to the ring.
1908 */
1909 if (wm_add_rxbuf(sc, i) != 0) {
1910 /*
1911 * Failed, throw away what we've done so
1912 * far, and discard the rest of the packet.
1913 */
1914 ifp->if_ierrors++;
1915 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1916 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1917 WM_INIT_RXDESC(sc, i);
1918 if ((status & WRX_ST_EOP) == 0)
1919 sc->sc_rxdiscard = 1;
1920 if (sc->sc_rxhead != NULL)
1921 m_freem(sc->sc_rxhead);
1922 WM_RXCHAIN_RESET(sc);
1923 DPRINTF(WM_DEBUG_RX,
1924 ("%s: RX: Rx buffer allocation failed, "
1925 "dropping packet%s\n", sc->sc_dev.dv_xname,
1926 sc->sc_rxdiscard ? " (discard)" : ""));
1927 continue;
1928 }
1929
1930 WM_RXCHAIN_LINK(sc, m);
1931
1932 m->m_len = len;
1933
1934 DPRINTF(WM_DEBUG_RX,
1935 ("%s: RX: buffer at %p len %d\n",
1936 sc->sc_dev.dv_xname, m->m_data, len));
1937
1938 /*
1939 * If this is not the end of the packet, keep
1940 * looking.
1941 */
1942 if ((status & WRX_ST_EOP) == 0) {
1943 sc->sc_rxlen += len;
1944 DPRINTF(WM_DEBUG_RX,
1945 ("%s: RX: not yet EOP, rxlen -> %d\n",
1946 sc->sc_dev.dv_xname, sc->sc_rxlen));
1947 continue;
1948 }
1949
1950 /*
1951 * Okay, we have the entire packet now...
1952 */
1953 *sc->sc_rxtailp = NULL;
1954 m = sc->sc_rxhead;
1955 len += sc->sc_rxlen;
1956
1957 WM_RXCHAIN_RESET(sc);
1958
1959 DPRINTF(WM_DEBUG_RX,
1960 ("%s: RX: have entire packet, len -> %d\n",
1961 sc->sc_dev.dv_xname, len));
1962
1963 /*
1964 * If an error occurred, update stats and drop the packet.
1965 */
1966 if (errors &
1967 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1968 ifp->if_ierrors++;
1969 if (errors & WRX_ER_SE)
1970 printf("%s: symbol error\n",
1971 sc->sc_dev.dv_xname);
1972 else if (errors & WRX_ER_SEQ)
1973 printf("%s: receive sequence error\n",
1974 sc->sc_dev.dv_xname);
1975 else if (errors & WRX_ER_CE)
1976 printf("%s: CRC error\n",
1977 sc->sc_dev.dv_xname);
1978 m_freem(m);
1979 continue;
1980 }
1981
1982 /*
1983 * No errors. Receive the packet.
1984 *
1985 * Note, we have configured the chip to include the
1986 * CRC with every packet.
1987 */
1988 m->m_flags |= M_HASFCS;
1989 m->m_pkthdr.rcvif = ifp;
1990 m->m_pkthdr.len = len;
1991
1992 #if 0 /* XXXJRT */
1993 /*
1994 * If VLANs are enabled, VLAN packets have been unwrapped
1995 * for us. Associate the tag with the packet.
1996 */
1997 if (sc->sc_ethercom.ec_nvlans != 0 &&
1998 (status & WRX_ST_VP) != 0) {
1999 struct m_tag *vtag;
2000
2001 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2002 M_NOWAIT);
2003 if (vtag == NULL) {
2004 ifp->if_ierrors++;
2005 printf("%s: unable to allocate VLAN tag\n",
2006 sc->sc_dev.dv_xname);
2007 m_freem(m);
2008 continue;
2009 }
2010
2011 *(u_int *)(vtag + 1) =
2012 le16toh(sc->sc_rxdescs[i].wrx_special);
2013 }
2014 #endif /* XXXJRT */
2015
2016 /*
2017 * Set up checksum info for this packet.
2018 */
2019 if (status & WRX_ST_IPCS) {
2020 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2021 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2022 if (errors & WRX_ER_IPE)
2023 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2024 }
2025 if (status & WRX_ST_TCPCS) {
2026 /*
2027 * Note: we don't know if this was TCP or UDP,
2028 * so we just set both bits, and expect the
2029 * upper layers to deal.
2030 */
2031 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2032 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2033 if (errors & WRX_ER_TCPE)
2034 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2035 }
2036
2037 ifp->if_ipackets++;
2038
2039 #if NBPFILTER > 0
2040 /* Pass this up to any BPF listeners. */
2041 if (ifp->if_bpf)
2042 bpf_mtap(ifp->if_bpf, m);
2043 #endif /* NBPFILTER > 0 */
2044
2045 /* Pass it on. */
2046 (*ifp->if_input)(ifp, m);
2047 }
2048
2049 /* Update the receive pointer. */
2050 sc->sc_rxptr = i;
2051
2052 DPRINTF(WM_DEBUG_RX,
2053 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2054 }
2055
2056 /*
2057 * wm_linkintr:
2058 *
2059 * Helper; handle link interrupts.
2060 */
2061 static void
2062 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2063 {
2064 uint32_t status;
2065
2066 /*
2067 * If we get a link status interrupt on a 1000BASE-T
2068 * device, just fall into the normal MII tick path.
2069 */
2070 if (sc->sc_flags & WM_F_HAS_MII) {
2071 if (icr & ICR_LSC) {
2072 DPRINTF(WM_DEBUG_LINK,
2073 ("%s: LINK: LSC -> mii_tick\n",
2074 sc->sc_dev.dv_xname));
2075 mii_tick(&sc->sc_mii);
2076 } else if (icr & ICR_RXSEQ) {
2077 DPRINTF(WM_DEBUG_LINK,
2078 ("%s: LINK Receive sequence error\n",
2079 sc->sc_dev.dv_xname));
2080 }
2081 return;
2082 }
2083
2084 /*
2085 * If we are now receiving /C/, check for link again in
2086 * a couple of link clock ticks.
2087 */
2088 if (icr & ICR_RXCFG) {
2089 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2090 sc->sc_dev.dv_xname));
2091 sc->sc_tbi_anstate = 2;
2092 }
2093
2094 if (icr & ICR_LSC) {
2095 status = CSR_READ(sc, WMREG_STATUS);
2096 if (status & STATUS_LU) {
2097 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2098 sc->sc_dev.dv_xname,
2099 (status & STATUS_FD) ? "FDX" : "HDX"));
2100 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2101 if (status & STATUS_FD)
2102 sc->sc_tctl |=
2103 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2104 else
2105 sc->sc_tctl |=
2106 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2107 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2108 sc->sc_tbi_linkup = 1;
2109 } else {
2110 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2111 sc->sc_dev.dv_xname));
2112 sc->sc_tbi_linkup = 0;
2113 }
2114 sc->sc_tbi_anstate = 2;
2115 wm_tbi_set_linkled(sc);
2116 } else if (icr & ICR_RXSEQ) {
2117 DPRINTF(WM_DEBUG_LINK,
2118 ("%s: LINK: Receive sequence error\n",
2119 sc->sc_dev.dv_xname));
2120 }
2121 }
2122
2123 /*
2124 * wm_tick:
2125 *
2126 * One second timer, used to check link status, sweep up
2127 * completed transmit jobs, etc.
2128 */
2129 static void
2130 wm_tick(void *arg)
2131 {
2132 struct wm_softc *sc = arg;
2133 int s;
2134
2135 s = splnet();
2136
2137 if (sc->sc_flags & WM_F_HAS_MII)
2138 mii_tick(&sc->sc_mii);
2139 else
2140 wm_tbi_check_link(sc);
2141
2142 splx(s);
2143
2144 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2145 }
2146
2147 /*
2148 * wm_reset:
2149 *
2150 * Reset the i82542 chip.
2151 */
2152 static void
2153 wm_reset(struct wm_softc *sc)
2154 {
2155 int i;
2156
2157 switch (sc->sc_type) {
2158 case WM_T_82544:
2159 case WM_T_82540:
2160 case WM_T_82545:
2161 case WM_T_82546:
2162 case WM_T_82541:
2163 case WM_T_82541_2:
2164 /*
2165 * These chips have a problem with the memory-mapped
2166 * write cycle when issuing the reset, so use I/O-mapped
2167 * access, if possible.
2168 */
2169 if (sc->sc_flags & WM_F_IOH_VALID)
2170 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2171 else
2172 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2173 break;
2174
2175 case WM_T_82545_3:
2176 case WM_T_82546_3:
2177 /* Use the shadow control register on these chips. */
2178 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2179 break;
2180
2181 default:
2182 /* Everything else can safely use the documented method. */
2183 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2184 break;
2185 }
2186 delay(10000);
2187
2188 for (i = 0; i < 1000; i++) {
2189 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2190 return;
2191 delay(20);
2192 }
2193
2194 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2195 printf("%s: WARNING: reset failed to complete\n",
2196 sc->sc_dev.dv_xname);
2197 }
2198
2199 /*
2200 * wm_init: [ifnet interface function]
2201 *
2202 * Initialize the interface. Must be called at splnet().
2203 */
2204 static int
2205 wm_init(struct ifnet *ifp)
2206 {
2207 struct wm_softc *sc = ifp->if_softc;
2208 struct wm_rxsoft *rxs;
2209 int i, error = 0;
2210 uint32_t reg;
2211
2212 /*
2213 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2214 * There is a small but measurable benefit to avoiding the adjusment
2215 * of the descriptor so that the headers are aligned, for normal mtu,
2216 * on such platforms. One possibility is that the DMA itself is
2217 * slightly more efficient if the front of the entire packet (instead
2218 * of the front of the headers) is aligned.
2219 *
2220 * Note we must always set align_tweak to 0 if we are using
2221 * jumbo frames.
2222 */
2223 #ifdef __NO_STRICT_ALIGNMENT
2224 sc->sc_align_tweak = 0;
2225 #else
2226 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2227 sc->sc_align_tweak = 0;
2228 else
2229 sc->sc_align_tweak = 2;
2230 #endif /* __NO_STRICT_ALIGNMENT */
2231
2232 /* Cancel any pending I/O. */
2233 wm_stop(ifp, 0);
2234
2235 /* Reset the chip to a known state. */
2236 wm_reset(sc);
2237
2238 /* Initialize the transmit descriptor ring. */
2239 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2240 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2241 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2242 sc->sc_txfree = WM_NTXDESC;
2243 sc->sc_txnext = 0;
2244
2245 sc->sc_txctx_ipcs = 0xffffffff;
2246 sc->sc_txctx_tucs = 0xffffffff;
2247
2248 if (sc->sc_type < WM_T_82543) {
2249 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2250 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2251 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2252 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2253 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2254 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2255 } else {
2256 CSR_WRITE(sc, WMREG_TBDAH, 0);
2257 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2258 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2259 CSR_WRITE(sc, WMREG_TDH, 0);
2260 CSR_WRITE(sc, WMREG_TDT, 0);
2261 CSR_WRITE(sc, WMREG_TIDV, 128);
2262
2263 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2264 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2265 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2266 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2267 }
2268 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2269 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2270
2271 /* Initialize the transmit job descriptors. */
2272 for (i = 0; i < WM_TXQUEUELEN; i++)
2273 sc->sc_txsoft[i].txs_mbuf = NULL;
2274 sc->sc_txsfree = WM_TXQUEUELEN;
2275 sc->sc_txsnext = 0;
2276 sc->sc_txsdirty = 0;
2277
2278 /*
2279 * Initialize the receive descriptor and receive job
2280 * descriptor rings.
2281 */
2282 if (sc->sc_type < WM_T_82543) {
2283 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2284 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2285 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2286 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2287 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2288 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2289
2290 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2291 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2292 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2293 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2294 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2295 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2296 } else {
2297 CSR_WRITE(sc, WMREG_RDBAH, 0);
2298 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2299 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2300 CSR_WRITE(sc, WMREG_RDH, 0);
2301 CSR_WRITE(sc, WMREG_RDT, 0);
2302 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2303 }
2304 for (i = 0; i < WM_NRXDESC; i++) {
2305 rxs = &sc->sc_rxsoft[i];
2306 if (rxs->rxs_mbuf == NULL) {
2307 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2308 printf("%s: unable to allocate or map rx "
2309 "buffer %d, error = %d\n",
2310 sc->sc_dev.dv_xname, i, error);
2311 /*
2312 * XXX Should attempt to run with fewer receive
2313 * XXX buffers instead of just failing.
2314 */
2315 wm_rxdrain(sc);
2316 goto out;
2317 }
2318 } else
2319 WM_INIT_RXDESC(sc, i);
2320 }
2321 sc->sc_rxptr = 0;
2322 sc->sc_rxdiscard = 0;
2323 WM_RXCHAIN_RESET(sc);
2324
2325 /*
2326 * Clear out the VLAN table -- we don't use it (yet).
2327 */
2328 CSR_WRITE(sc, WMREG_VET, 0);
2329 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2330 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2331
2332 /*
2333 * Set up flow-control parameters.
2334 *
2335 * XXX Values could probably stand some tuning.
2336 */
2337 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2338 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2339 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2340 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2341
2342 if (sc->sc_type < WM_T_82543) {
2343 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2344 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2345 } else {
2346 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2347 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2348 }
2349 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2350 }
2351
2352 #if 0 /* XXXJRT */
2353 /* Deal with VLAN enables. */
2354 if (sc->sc_ethercom.ec_nvlans != 0)
2355 sc->sc_ctrl |= CTRL_VME;
2356 else
2357 #endif /* XXXJRT */
2358 sc->sc_ctrl &= ~CTRL_VME;
2359
2360 /* Write the control registers. */
2361 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2362 #if 0
2363 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2364 #endif
2365
2366 /*
2367 * Set up checksum offload parameters.
2368 */
2369 reg = CSR_READ(sc, WMREG_RXCSUM);
2370 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2371 reg |= RXCSUM_IPOFL;
2372 else
2373 reg &= ~RXCSUM_IPOFL;
2374 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2375 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2376 else {
2377 reg &= ~RXCSUM_TUOFL;
2378 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2379 reg &= ~RXCSUM_IPOFL;
2380 }
2381 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2382
2383 /*
2384 * Set up the interrupt registers.
2385 */
2386 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2387 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2388 ICR_RXO | ICR_RXT0;
2389 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2390 sc->sc_icr |= ICR_RXCFG;
2391 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2392
2393 /* Set up the inter-packet gap. */
2394 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2395
2396 #if 0 /* XXXJRT */
2397 /* Set the VLAN ethernetype. */
2398 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2399 #endif
2400
2401 /*
2402 * Set up the transmit control register; we start out with
2403 * a collision distance suitable for FDX, but update it whe
2404 * we resolve the media type.
2405 */
2406 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2407 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2408 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2409
2410 /* Set the media. */
2411 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2412
2413 /*
2414 * Set up the receive control register; we actually program
2415 * the register when we set the receive filter. Use multicast
2416 * address offset type 0.
2417 *
2418 * Only the i82544 has the ability to strip the incoming
2419 * CRC, so we don't enable that feature.
2420 */
2421 sc->sc_mchash_type = 0;
2422 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2423 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2424
2425 if(MCLBYTES == 2048) {
2426 sc->sc_rctl |= RCTL_2k;
2427 } else {
2428 /*
2429 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2430 * XXX segments, dropping" -- why?
2431 */
2432 #if 0
2433 if(sc->sc_type >= WM_T_82543) {
2434 switch(MCLBYTES) {
2435 case 4096:
2436 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2437 break;
2438 case 8192:
2439 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2440 break;
2441 case 16384:
2442 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2443 break;
2444 default:
2445 panic("wm_init: MCLBYTES %d unsupported",
2446 MCLBYTES);
2447 break;
2448 }
2449 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2450 #else
2451 panic("wm_init: MCLBYTES > 2048 not supported.");
2452 #endif
2453 }
2454
2455 /* Set the receive filter. */
2456 wm_set_filter(sc);
2457
2458 /* Start the one second link check clock. */
2459 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2460
2461 /* ...all done! */
2462 ifp->if_flags |= IFF_RUNNING;
2463 ifp->if_flags &= ~IFF_OACTIVE;
2464
2465 out:
2466 if (error)
2467 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2468 return (error);
2469 }
2470
2471 /*
2472 * wm_rxdrain:
2473 *
2474 * Drain the receive queue.
2475 */
2476 static void
2477 wm_rxdrain(struct wm_softc *sc)
2478 {
2479 struct wm_rxsoft *rxs;
2480 int i;
2481
2482 for (i = 0; i < WM_NRXDESC; i++) {
2483 rxs = &sc->sc_rxsoft[i];
2484 if (rxs->rxs_mbuf != NULL) {
2485 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2486 m_freem(rxs->rxs_mbuf);
2487 rxs->rxs_mbuf = NULL;
2488 }
2489 }
2490 }
2491
2492 /*
2493 * wm_stop: [ifnet interface function]
2494 *
2495 * Stop transmission on the interface.
2496 */
2497 static void
2498 wm_stop(struct ifnet *ifp, int disable)
2499 {
2500 struct wm_softc *sc = ifp->if_softc;
2501 struct wm_txsoft *txs;
2502 int i;
2503
2504 /* Stop the one second clock. */
2505 callout_stop(&sc->sc_tick_ch);
2506
2507 if (sc->sc_flags & WM_F_HAS_MII) {
2508 /* Down the MII. */
2509 mii_down(&sc->sc_mii);
2510 }
2511
2512 /* Stop the transmit and receive processes. */
2513 CSR_WRITE(sc, WMREG_TCTL, 0);
2514 CSR_WRITE(sc, WMREG_RCTL, 0);
2515
2516 /* Release any queued transmit buffers. */
2517 for (i = 0; i < WM_TXQUEUELEN; i++) {
2518 txs = &sc->sc_txsoft[i];
2519 if (txs->txs_mbuf != NULL) {
2520 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2521 m_freem(txs->txs_mbuf);
2522 txs->txs_mbuf = NULL;
2523 }
2524 }
2525
2526 if (disable)
2527 wm_rxdrain(sc);
2528
2529 /* Mark the interface as down and cancel the watchdog timer. */
2530 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2531 ifp->if_timer = 0;
2532 }
2533
2534 /*
2535 * wm_acquire_eeprom:
2536 *
2537 * Perform the EEPROM handshake required on some chips.
2538 */
2539 static int
2540 wm_acquire_eeprom(struct wm_softc *sc)
2541 {
2542 uint32_t reg;
2543 int x;
2544
2545 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2546 reg = CSR_READ(sc, WMREG_EECD);
2547
2548 /* Request EEPROM access. */
2549 reg |= EECD_EE_REQ;
2550 CSR_WRITE(sc, WMREG_EECD, reg);
2551
2552 /* ..and wait for it to be granted. */
2553 for (x = 0; x < 100; x++) {
2554 reg = CSR_READ(sc, WMREG_EECD);
2555 if (reg & EECD_EE_GNT)
2556 break;
2557 delay(5);
2558 }
2559 if ((reg & EECD_EE_GNT) == 0) {
2560 aprint_error("%s: could not acquire EEPROM GNT\n",
2561 sc->sc_dev.dv_xname);
2562 reg &= ~EECD_EE_REQ;
2563 CSR_WRITE(sc, WMREG_EECD, reg);
2564 return (1);
2565 }
2566 }
2567
2568 return (0);
2569 }
2570
2571 /*
2572 * wm_release_eeprom:
2573 *
2574 * Release the EEPROM mutex.
2575 */
2576 static void
2577 wm_release_eeprom(struct wm_softc *sc)
2578 {
2579 uint32_t reg;
2580
2581 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2582 reg = CSR_READ(sc, WMREG_EECD);
2583 reg &= ~EECD_EE_REQ;
2584 CSR_WRITE(sc, WMREG_EECD, reg);
2585 }
2586 }
2587
2588 /*
2589 * wm_eeprom_sendbits:
2590 *
2591 * Send a series of bits to the EEPROM.
2592 */
2593 static void
2594 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2595 {
2596 uint32_t reg;
2597 int x;
2598
2599 reg = CSR_READ(sc, WMREG_EECD);
2600
2601 for (x = nbits; x > 0; x--) {
2602 if (bits & (1U << (x - 1)))
2603 reg |= EECD_DI;
2604 else
2605 reg &= ~EECD_DI;
2606 CSR_WRITE(sc, WMREG_EECD, reg);
2607 delay(2);
2608 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2609 delay(2);
2610 CSR_WRITE(sc, WMREG_EECD, reg);
2611 delay(2);
2612 }
2613 }
2614
2615 /*
2616 * wm_eeprom_recvbits:
2617 *
2618 * Receive a series of bits from the EEPROM.
2619 */
2620 static void
2621 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2622 {
2623 uint32_t reg, val;
2624 int x;
2625
2626 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2627
2628 val = 0;
2629 for (x = nbits; x > 0; x--) {
2630 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2631 delay(2);
2632 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2633 val |= (1U << (x - 1));
2634 CSR_WRITE(sc, WMREG_EECD, reg);
2635 delay(2);
2636 }
2637 *valp = val;
2638 }
2639
2640 /*
2641 * wm_read_eeprom_uwire:
2642 *
2643 * Read a word from the EEPROM using the MicroWire protocol.
2644 */
2645 static int
2646 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2647 {
2648 uint32_t reg, val;
2649 int i;
2650
2651 for (i = 0; i < wordcnt; i++) {
2652 /* Clear SK and DI. */
2653 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2654 CSR_WRITE(sc, WMREG_EECD, reg);
2655
2656 /* Set CHIP SELECT. */
2657 reg |= EECD_CS;
2658 CSR_WRITE(sc, WMREG_EECD, reg);
2659 delay(2);
2660
2661 /* Shift in the READ command. */
2662 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2663
2664 /* Shift in address. */
2665 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2666
2667 /* Shift out the data. */
2668 wm_eeprom_recvbits(sc, &val, 16);
2669 data[i] = val & 0xffff;
2670
2671 /* Clear CHIP SELECT. */
2672 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2673 CSR_WRITE(sc, WMREG_EECD, reg);
2674 delay(2);
2675 }
2676
2677 return (0);
2678 }
2679
2680 /*
2681 * wm_spi_eeprom_ready:
2682 *
2683 * Wait for a SPI EEPROM to be ready for commands.
2684 */
2685 static int
2686 wm_spi_eeprom_ready(struct wm_softc *sc)
2687 {
2688 uint32_t val;
2689 int usec;
2690
2691 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2692 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2693 wm_eeprom_recvbits(sc, &val, 8);
2694 if ((val & SPI_SR_RDY) == 0)
2695 break;
2696 }
2697 if (usec >= SPI_MAX_RETRIES) {
2698 aprint_error("%s: EEPROM failed to become ready\n",
2699 sc->sc_dev.dv_xname);
2700 return (1);
2701 }
2702 return (0);
2703 }
2704
2705 /*
2706 * wm_read_eeprom_spi:
2707 *
2708 * Read a work from the EEPROM using the SPI protocol.
2709 */
2710 static int
2711 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2712 {
2713 uint32_t reg, val;
2714 int i;
2715 uint8_t opc;
2716
2717 /* Clear SK and CS. */
2718 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2719 CSR_WRITE(sc, WMREG_EECD, reg);
2720 delay(2);
2721
2722 if (wm_spi_eeprom_ready(sc))
2723 return (1);
2724
2725 /* Toggle CS to flush commands. */
2726 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2727 delay(2);
2728 CSR_WRITE(sc, WMREG_EECD, reg);
2729 delay(2);
2730
2731 opc = SPI_OPC_READ;
2732 if (sc->sc_ee_addrbits == 8 && word >= 128)
2733 opc |= SPI_OPC_A8;
2734
2735 wm_eeprom_sendbits(sc, opc, 8);
2736 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2737
2738 for (i = 0; i < wordcnt; i++) {
2739 wm_eeprom_recvbits(sc, &val, 16);
2740 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2741 }
2742
2743 /* Raise CS and clear SK. */
2744 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2745 CSR_WRITE(sc, WMREG_EECD, reg);
2746 delay(2);
2747
2748 return (0);
2749 }
2750
2751 /*
2752 * wm_read_eeprom:
2753 *
2754 * Read data from the serial EEPROM.
2755 */
2756 static int
2757 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2758 {
2759 int rv;
2760
2761 if (wm_acquire_eeprom(sc))
2762 return (1);
2763
2764 if (sc->sc_flags & WM_F_EEPROM_SPI)
2765 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2766 else
2767 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2768
2769 wm_release_eeprom(sc);
2770 return (rv);
2771 }
2772
2773 /*
2774 * wm_add_rxbuf:
2775 *
2776 * Add a receive buffer to the indiciated descriptor.
2777 */
2778 static int
2779 wm_add_rxbuf(struct wm_softc *sc, int idx)
2780 {
2781 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2782 struct mbuf *m;
2783 int error;
2784
2785 MGETHDR(m, M_DONTWAIT, MT_DATA);
2786 if (m == NULL)
2787 return (ENOBUFS);
2788
2789 MCLGET(m, M_DONTWAIT);
2790 if ((m->m_flags & M_EXT) == 0) {
2791 m_freem(m);
2792 return (ENOBUFS);
2793 }
2794
2795 if (rxs->rxs_mbuf != NULL)
2796 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2797
2798 rxs->rxs_mbuf = m;
2799
2800 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2801 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2802 BUS_DMA_READ|BUS_DMA_NOWAIT);
2803 if (error) {
2804 printf("%s: unable to load rx DMA map %d, error = %d\n",
2805 sc->sc_dev.dv_xname, idx, error);
2806 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2807 }
2808
2809 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2810 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2811
2812 WM_INIT_RXDESC(sc, idx);
2813
2814 return (0);
2815 }
2816
2817 /*
2818 * wm_set_ral:
2819 *
2820 * Set an entery in the receive address list.
2821 */
2822 static void
2823 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2824 {
2825 uint32_t ral_lo, ral_hi;
2826
2827 if (enaddr != NULL) {
2828 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2829 (enaddr[3] << 24);
2830 ral_hi = enaddr[4] | (enaddr[5] << 8);
2831 ral_hi |= RAL_AV;
2832 } else {
2833 ral_lo = 0;
2834 ral_hi = 0;
2835 }
2836
2837 if (sc->sc_type >= WM_T_82544) {
2838 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2839 ral_lo);
2840 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2841 ral_hi);
2842 } else {
2843 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2844 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2845 }
2846 }
2847
2848 /*
2849 * wm_mchash:
2850 *
2851 * Compute the hash of the multicast address for the 4096-bit
2852 * multicast filter.
2853 */
2854 static uint32_t
2855 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2856 {
2857 static const int lo_shift[4] = { 4, 3, 2, 0 };
2858 static const int hi_shift[4] = { 4, 5, 6, 8 };
2859 uint32_t hash;
2860
2861 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2862 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2863
2864 return (hash & 0xfff);
2865 }
2866
2867 /*
2868 * wm_set_filter:
2869 *
2870 * Set up the receive filter.
2871 */
2872 static void
2873 wm_set_filter(struct wm_softc *sc)
2874 {
2875 struct ethercom *ec = &sc->sc_ethercom;
2876 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2877 struct ether_multi *enm;
2878 struct ether_multistep step;
2879 bus_addr_t mta_reg;
2880 uint32_t hash, reg, bit;
2881 int i;
2882
2883 if (sc->sc_type >= WM_T_82544)
2884 mta_reg = WMREG_CORDOVA_MTA;
2885 else
2886 mta_reg = WMREG_MTA;
2887
2888 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2889
2890 if (ifp->if_flags & IFF_BROADCAST)
2891 sc->sc_rctl |= RCTL_BAM;
2892 if (ifp->if_flags & IFF_PROMISC) {
2893 sc->sc_rctl |= RCTL_UPE;
2894 goto allmulti;
2895 }
2896
2897 /*
2898 * Set the station address in the first RAL slot, and
2899 * clear the remaining slots.
2900 */
2901 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2902 for (i = 1; i < WM_RAL_TABSIZE; i++)
2903 wm_set_ral(sc, NULL, i);
2904
2905 /* Clear out the multicast table. */
2906 for (i = 0; i < WM_MC_TABSIZE; i++)
2907 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2908
2909 ETHER_FIRST_MULTI(step, ec, enm);
2910 while (enm != NULL) {
2911 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2912 /*
2913 * We must listen to a range of multicast addresses.
2914 * For now, just accept all multicasts, rather than
2915 * trying to set only those filter bits needed to match
2916 * the range. (At this time, the only use of address
2917 * ranges is for IP multicast routing, for which the
2918 * range is big enough to require all bits set.)
2919 */
2920 goto allmulti;
2921 }
2922
2923 hash = wm_mchash(sc, enm->enm_addrlo);
2924
2925 reg = (hash >> 5) & 0x7f;
2926 bit = hash & 0x1f;
2927
2928 hash = CSR_READ(sc, mta_reg + (reg << 2));
2929 hash |= 1U << bit;
2930
2931 /* XXX Hardware bug?? */
2932 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2933 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2934 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2935 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2936 } else
2937 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2938
2939 ETHER_NEXT_MULTI(step, enm);
2940 }
2941
2942 ifp->if_flags &= ~IFF_ALLMULTI;
2943 goto setit;
2944
2945 allmulti:
2946 ifp->if_flags |= IFF_ALLMULTI;
2947 sc->sc_rctl |= RCTL_MPE;
2948
2949 setit:
2950 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2951 }
2952
2953 /*
2954 * wm_tbi_mediainit:
2955 *
2956 * Initialize media for use on 1000BASE-X devices.
2957 */
2958 static void
2959 wm_tbi_mediainit(struct wm_softc *sc)
2960 {
2961 const char *sep = "";
2962
2963 if (sc->sc_type < WM_T_82543)
2964 sc->sc_tipg = TIPG_WM_DFLT;
2965 else
2966 sc->sc_tipg = TIPG_LG_DFLT;
2967
2968 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2969 wm_tbi_mediastatus);
2970
2971 /*
2972 * SWD Pins:
2973 *
2974 * 0 = Link LED (output)
2975 * 1 = Loss Of Signal (input)
2976 */
2977 sc->sc_ctrl |= CTRL_SWDPIO(0);
2978 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2979
2980 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2981
2982 #define ADD(ss, mm, dd) \
2983 do { \
2984 printf("%s%s", sep, ss); \
2985 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2986 sep = ", "; \
2987 } while (/*CONSTCOND*/0)
2988
2989 printf("%s: ", sc->sc_dev.dv_xname);
2990 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2991 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2992 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2993 printf("\n");
2994
2995 #undef ADD
2996
2997 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2998 }
2999
3000 /*
3001 * wm_tbi_mediastatus: [ifmedia interface function]
3002 *
3003 * Get the current interface media status on a 1000BASE-X device.
3004 */
3005 static void
3006 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3007 {
3008 struct wm_softc *sc = ifp->if_softc;
3009
3010 ifmr->ifm_status = IFM_AVALID;
3011 ifmr->ifm_active = IFM_ETHER;
3012
3013 if (sc->sc_tbi_linkup == 0) {
3014 ifmr->ifm_active |= IFM_NONE;
3015 return;
3016 }
3017
3018 ifmr->ifm_status |= IFM_ACTIVE;
3019 ifmr->ifm_active |= IFM_1000_SX;
3020 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3021 ifmr->ifm_active |= IFM_FDX;
3022 }
3023
3024 /*
3025 * wm_tbi_mediachange: [ifmedia interface function]
3026 *
3027 * Set hardware to newly-selected media on a 1000BASE-X device.
3028 */
3029 static int
3030 wm_tbi_mediachange(struct ifnet *ifp)
3031 {
3032 struct wm_softc *sc = ifp->if_softc;
3033 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3034 uint32_t status;
3035 int i;
3036
3037 sc->sc_txcw = ife->ifm_data;
3038 if (sc->sc_ctrl & CTRL_RFCE)
3039 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
3040 if (sc->sc_ctrl & CTRL_TFCE)
3041 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
3042 sc->sc_txcw |= TXCW_ANE;
3043
3044 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3045 delay(10000);
3046
3047 sc->sc_tbi_anstate = 0;
3048
3049 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3050 /* Have signal; wait for the link to come up. */
3051 for (i = 0; i < 50; i++) {
3052 delay(10000);
3053 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3054 break;
3055 }
3056
3057 status = CSR_READ(sc, WMREG_STATUS);
3058 if (status & STATUS_LU) {
3059 /* Link is up. */
3060 DPRINTF(WM_DEBUG_LINK,
3061 ("%s: LINK: set media -> link up %s\n",
3062 sc->sc_dev.dv_xname,
3063 (status & STATUS_FD) ? "FDX" : "HDX"));
3064 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3065 if (status & STATUS_FD)
3066 sc->sc_tctl |=
3067 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3068 else
3069 sc->sc_tctl |=
3070 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3071 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3072 sc->sc_tbi_linkup = 1;
3073 } else {
3074 /* Link is down. */
3075 DPRINTF(WM_DEBUG_LINK,
3076 ("%s: LINK: set media -> link down\n",
3077 sc->sc_dev.dv_xname));
3078 sc->sc_tbi_linkup = 0;
3079 }
3080 } else {
3081 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3082 sc->sc_dev.dv_xname));
3083 sc->sc_tbi_linkup = 0;
3084 }
3085
3086 wm_tbi_set_linkled(sc);
3087
3088 return (0);
3089 }
3090
3091 /*
3092 * wm_tbi_set_linkled:
3093 *
3094 * Update the link LED on 1000BASE-X devices.
3095 */
3096 static void
3097 wm_tbi_set_linkled(struct wm_softc *sc)
3098 {
3099
3100 if (sc->sc_tbi_linkup)
3101 sc->sc_ctrl |= CTRL_SWDPIN(0);
3102 else
3103 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3104
3105 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3106 }
3107
3108 /*
3109 * wm_tbi_check_link:
3110 *
3111 * Check the link on 1000BASE-X devices.
3112 */
3113 static void
3114 wm_tbi_check_link(struct wm_softc *sc)
3115 {
3116 uint32_t rxcw, ctrl, status;
3117
3118 if (sc->sc_tbi_anstate == 0)
3119 return;
3120 else if (sc->sc_tbi_anstate > 1) {
3121 DPRINTF(WM_DEBUG_LINK,
3122 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3123 sc->sc_tbi_anstate));
3124 sc->sc_tbi_anstate--;
3125 return;
3126 }
3127
3128 sc->sc_tbi_anstate = 0;
3129
3130 rxcw = CSR_READ(sc, WMREG_RXCW);
3131 ctrl = CSR_READ(sc, WMREG_CTRL);
3132 status = CSR_READ(sc, WMREG_STATUS);
3133
3134 if ((status & STATUS_LU) == 0) {
3135 DPRINTF(WM_DEBUG_LINK,
3136 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3137 sc->sc_tbi_linkup = 0;
3138 } else {
3139 DPRINTF(WM_DEBUG_LINK,
3140 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3141 (status & STATUS_FD) ? "FDX" : "HDX"));
3142 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3143 if (status & STATUS_FD)
3144 sc->sc_tctl |=
3145 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3146 else
3147 sc->sc_tctl |=
3148 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3149 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3150 sc->sc_tbi_linkup = 1;
3151 }
3152
3153 wm_tbi_set_linkled(sc);
3154 }
3155
3156 /*
3157 * wm_gmii_reset:
3158 *
3159 * Reset the PHY.
3160 */
3161 static void
3162 wm_gmii_reset(struct wm_softc *sc)
3163 {
3164 uint32_t reg;
3165
3166 if (sc->sc_type >= WM_T_82544) {
3167 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3168 delay(20000);
3169
3170 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3171 delay(20000);
3172 } else {
3173 /* The PHY reset pin is active-low. */
3174 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3175 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3176 CTRL_EXT_SWDPIN(4));
3177 reg |= CTRL_EXT_SWDPIO(4);
3178
3179 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3180 delay(10);
3181
3182 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3183 delay(10);
3184
3185 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3186 delay(10);
3187 #if 0
3188 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3189 #endif
3190 }
3191 }
3192
3193 /*
3194 * wm_gmii_mediainit:
3195 *
3196 * Initialize media for use on 1000BASE-T devices.
3197 */
3198 static void
3199 wm_gmii_mediainit(struct wm_softc *sc)
3200 {
3201 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3202
3203 /* We have MII. */
3204 sc->sc_flags |= WM_F_HAS_MII;
3205
3206 sc->sc_tipg = TIPG_1000T_DFLT;
3207
3208 /*
3209 * Let the chip set speed/duplex on its own based on
3210 * signals from the PHY.
3211 */
3212 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3213 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3214
3215 /* Initialize our media structures and probe the GMII. */
3216 sc->sc_mii.mii_ifp = ifp;
3217
3218 if (sc->sc_type >= WM_T_82544) {
3219 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3220 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3221 } else {
3222 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3223 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3224 }
3225 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3226
3227 wm_gmii_reset(sc);
3228
3229 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3230 wm_gmii_mediastatus);
3231
3232 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3233 MII_OFFSET_ANY, 0);
3234 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3235 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3236 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3237 } else
3238 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3239 }
3240
3241 /*
3242 * wm_gmii_mediastatus: [ifmedia interface function]
3243 *
3244 * Get the current interface media status on a 1000BASE-T device.
3245 */
3246 static void
3247 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3248 {
3249 struct wm_softc *sc = ifp->if_softc;
3250
3251 mii_pollstat(&sc->sc_mii);
3252 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3253 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3254 }
3255
3256 /*
3257 * wm_gmii_mediachange: [ifmedia interface function]
3258 *
3259 * Set hardware to newly-selected media on a 1000BASE-T device.
3260 */
3261 static int
3262 wm_gmii_mediachange(struct ifnet *ifp)
3263 {
3264 struct wm_softc *sc = ifp->if_softc;
3265
3266 if (ifp->if_flags & IFF_UP)
3267 mii_mediachg(&sc->sc_mii);
3268 return (0);
3269 }
3270
3271 #define MDI_IO CTRL_SWDPIN(2)
3272 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3273 #define MDI_CLK CTRL_SWDPIN(3)
3274
3275 static void
3276 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3277 {
3278 uint32_t i, v;
3279
3280 v = CSR_READ(sc, WMREG_CTRL);
3281 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3282 v |= MDI_DIR | CTRL_SWDPIO(3);
3283
3284 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3285 if (data & i)
3286 v |= MDI_IO;
3287 else
3288 v &= ~MDI_IO;
3289 CSR_WRITE(sc, WMREG_CTRL, v);
3290 delay(10);
3291 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3292 delay(10);
3293 CSR_WRITE(sc, WMREG_CTRL, v);
3294 delay(10);
3295 }
3296 }
3297
3298 static uint32_t
3299 i82543_mii_recvbits(struct wm_softc *sc)
3300 {
3301 uint32_t v, i, data = 0;
3302
3303 v = CSR_READ(sc, WMREG_CTRL);
3304 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3305 v |= CTRL_SWDPIO(3);
3306
3307 CSR_WRITE(sc, WMREG_CTRL, v);
3308 delay(10);
3309 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3310 delay(10);
3311 CSR_WRITE(sc, WMREG_CTRL, v);
3312 delay(10);
3313
3314 for (i = 0; i < 16; i++) {
3315 data <<= 1;
3316 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3317 delay(10);
3318 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3319 data |= 1;
3320 CSR_WRITE(sc, WMREG_CTRL, v);
3321 delay(10);
3322 }
3323
3324 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3325 delay(10);
3326 CSR_WRITE(sc, WMREG_CTRL, v);
3327 delay(10);
3328
3329 return (data);
3330 }
3331
3332 #undef MDI_IO
3333 #undef MDI_DIR
3334 #undef MDI_CLK
3335
3336 /*
3337 * wm_gmii_i82543_readreg: [mii interface function]
3338 *
3339 * Read a PHY register on the GMII (i82543 version).
3340 */
3341 static int
3342 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3343 {
3344 struct wm_softc *sc = (void *) self;
3345 int rv;
3346
3347 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3348 i82543_mii_sendbits(sc, reg | (phy << 5) |
3349 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3350 rv = i82543_mii_recvbits(sc) & 0xffff;
3351
3352 DPRINTF(WM_DEBUG_GMII,
3353 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3354 sc->sc_dev.dv_xname, phy, reg, rv));
3355
3356 return (rv);
3357 }
3358
3359 /*
3360 * wm_gmii_i82543_writereg: [mii interface function]
3361 *
3362 * Write a PHY register on the GMII (i82543 version).
3363 */
3364 static void
3365 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3366 {
3367 struct wm_softc *sc = (void *) self;
3368
3369 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3370 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3371 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3372 (MII_COMMAND_START << 30), 32);
3373 }
3374
3375 /*
3376 * wm_gmii_i82544_readreg: [mii interface function]
3377 *
3378 * Read a PHY register on the GMII.
3379 */
3380 static int
3381 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3382 {
3383 struct wm_softc *sc = (void *) self;
3384 uint32_t mdic;
3385 int i, rv;
3386
3387 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3388 MDIC_REGADD(reg));
3389
3390 for (i = 0; i < 100; i++) {
3391 mdic = CSR_READ(sc, WMREG_MDIC);
3392 if (mdic & MDIC_READY)
3393 break;
3394 delay(10);
3395 }
3396
3397 if ((mdic & MDIC_READY) == 0) {
3398 printf("%s: MDIC read timed out: phy %d reg %d\n",
3399 sc->sc_dev.dv_xname, phy, reg);
3400 rv = 0;
3401 } else if (mdic & MDIC_E) {
3402 #if 0 /* This is normal if no PHY is present. */
3403 printf("%s: MDIC read error: phy %d reg %d\n",
3404 sc->sc_dev.dv_xname, phy, reg);
3405 #endif
3406 rv = 0;
3407 } else {
3408 rv = MDIC_DATA(mdic);
3409 if (rv == 0xffff)
3410 rv = 0;
3411 }
3412
3413 return (rv);
3414 }
3415
3416 /*
3417 * wm_gmii_i82544_writereg: [mii interface function]
3418 *
3419 * Write a PHY register on the GMII.
3420 */
3421 static void
3422 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3423 {
3424 struct wm_softc *sc = (void *) self;
3425 uint32_t mdic;
3426 int i;
3427
3428 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3429 MDIC_REGADD(reg) | MDIC_DATA(val));
3430
3431 for (i = 0; i < 100; i++) {
3432 mdic = CSR_READ(sc, WMREG_MDIC);
3433 if (mdic & MDIC_READY)
3434 break;
3435 delay(10);
3436 }
3437
3438 if ((mdic & MDIC_READY) == 0)
3439 printf("%s: MDIC write timed out: phy %d reg %d\n",
3440 sc->sc_dev.dv_xname, phy, reg);
3441 else if (mdic & MDIC_E)
3442 printf("%s: MDIC write error: phy %d reg %d\n",
3443 sc->sc_dev.dv_xname, phy, reg);
3444 }
3445
3446 /*
3447 * wm_gmii_statchg: [mii interface function]
3448 *
3449 * Callback from MII layer when media changes.
3450 */
3451 static void
3452 wm_gmii_statchg(struct device *self)
3453 {
3454 struct wm_softc *sc = (void *) self;
3455
3456 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3457
3458 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3459 DPRINTF(WM_DEBUG_LINK,
3460 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3461 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3462 } else {
3463 DPRINTF(WM_DEBUG_LINK,
3464 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3465 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3466 }
3467
3468 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3469 }
3470