if_wm.c revision 1.54 1 /* $NetBSD: if_wm.c,v 1.54 2003/10/21 16:41:51 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.54 2003/10/21 16:41:51 thorpej Exp $");
48
49 #include "bpfilter.h"
50 #include "rnd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/callout.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/ioctl.h>
60 #include <sys/errno.h>
61 #include <sys/device.h>
62 #include <sys/queue.h>
63
64 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
65
66 #if NRND > 0
67 #include <sys/rnd.h>
68 #endif
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_ether.h>
74
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <netinet/in.h> /* XXX for struct ip */
80 #include <netinet/in_systm.h> /* XXX for struct ip */
81 #include <netinet/ip.h> /* XXX for struct ip */
82 #include <netinet/tcp.h> /* XXX for struct tcphdr */
83
84 #include <machine/bus.h>
85 #include <machine/intr.h>
86 #include <machine/endian.h>
87
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 #include <dev/mii/mii_bitbang.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95
96 #include <dev/pci/if_wmreg.h>
97
98 #ifdef WM_DEBUG
99 #define WM_DEBUG_LINK 0x01
100 #define WM_DEBUG_TX 0x02
101 #define WM_DEBUG_RX 0x04
102 #define WM_DEBUG_GMII 0x08
103 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
104
105 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
106 #else
107 #define DPRINTF(x, y) /* nothing */
108 #endif /* WM_DEBUG */
109
110 /*
111 * Transmit descriptor list size. Due to errata, we can only have
112 * 256 hardware descriptors in the ring. We tell the upper layers
113 * that they can queue a lot of packets, and we go ahead and manage
114 * up to 64 of them at a time. We allow up to 16 DMA segments per
115 * packet.
116 */
117 #define WM_NTXSEGS 16
118 #define WM_IFQUEUELEN 256
119 #define WM_TXQUEUELEN 64
120 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
121 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
122 #define WM_NTXDESC 256
123 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
124 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
125 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
126
127 /*
128 * Receive descriptor list size. We have one Rx buffer for normal
129 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
130 * packet. We allocate 256 receive descriptors, each with a 2k
131 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
132 */
133 #define WM_NRXDESC 256
134 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
135 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
136 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
137
138 /*
139 * Control structures are DMA'd to the i82542 chip. We allocate them in
140 * a single clump that maps to a single DMA segment to make serveral things
141 * easier.
142 */
143 struct wm_control_data {
144 /*
145 * The transmit descriptors.
146 */
147 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
148
149 /*
150 * The receive descriptors.
151 */
152 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
153 };
154
155 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
156 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
157 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
158
159 /*
160 * Software state for transmit jobs.
161 */
162 struct wm_txsoft {
163 struct mbuf *txs_mbuf; /* head of our mbuf chain */
164 bus_dmamap_t txs_dmamap; /* our DMA map */
165 int txs_firstdesc; /* first descriptor in packet */
166 int txs_lastdesc; /* last descriptor in packet */
167 int txs_ndesc; /* # of descriptors used */
168 };
169
170 /*
171 * Software state for receive buffers. Each descriptor gets a
172 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
173 * more than one buffer, we chain them together.
174 */
175 struct wm_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 typedef enum {
181 WM_T_unknown = 0,
182 WM_T_82542_2_0, /* i82542 2.0 (really old) */
183 WM_T_82542_2_1, /* i82542 2.1+ (old) */
184 WM_T_82543, /* i82543 */
185 WM_T_82544, /* i82544 */
186 WM_T_82540, /* i82540 */
187 WM_T_82545, /* i82545 */
188 WM_T_82545_3, /* i82545 3.0+ */
189 WM_T_82546, /* i82546 */
190 WM_T_82546_3, /* i82546 3.0+ */
191 WM_T_82541, /* i82541 */
192 WM_T_82541_2, /* i82541 2.0+ */
193 WM_T_82547, /* i82547 */
194 WM_T_82547_2, /* i82547 2.0+ */
195 } wm_chip_type;
196
197 /*
198 * Software state per device.
199 */
200 struct wm_softc {
201 struct device sc_dev; /* generic device information */
202 bus_space_tag_t sc_st; /* bus space tag */
203 bus_space_handle_t sc_sh; /* bus space handle */
204 bus_space_tag_t sc_iot; /* I/O space tag */
205 bus_space_handle_t sc_ioh; /* I/O space handle */
206 bus_dma_tag_t sc_dmat; /* bus DMA tag */
207 struct ethercom sc_ethercom; /* ethernet common data */
208 void *sc_sdhook; /* shutdown hook */
209
210 wm_chip_type sc_type; /* chip type */
211 int sc_flags; /* flags; see below */
212 int sc_bus_speed; /* PCI/PCIX bus speed */
213 int sc_pcix_offset; /* PCIX capability register offset */
214
215 void *sc_ih; /* interrupt cookie */
216
217 int sc_ee_addrbits; /* EEPROM address bits */
218
219 struct mii_data sc_mii; /* MII/media information */
220
221 struct callout sc_tick_ch; /* tick callout */
222
223 bus_dmamap_t sc_cddmamap; /* control data DMA map */
224 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
225
226 int sc_align_tweak;
227
228 /*
229 * Software state for the transmit and receive descriptors.
230 */
231 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
232 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
233
234 /*
235 * Control data structures.
236 */
237 struct wm_control_data *sc_control_data;
238 #define sc_txdescs sc_control_data->wcd_txdescs
239 #define sc_rxdescs sc_control_data->wcd_rxdescs
240
241 #ifdef WM_EVENT_COUNTERS
242 /* Event counters. */
243 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
244 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
245 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
246 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
247 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
248 struct evcnt sc_ev_rxintr; /* Rx interrupts */
249 struct evcnt sc_ev_linkintr; /* Link interrupts */
250
251 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
252 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
253 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
254 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
255
256 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
257 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
258 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
259
260 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
261 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
262
263 struct evcnt sc_ev_tu; /* Tx underrun */
264 #endif /* WM_EVENT_COUNTERS */
265
266 bus_addr_t sc_tdt_reg; /* offset of TDT register */
267
268 int sc_txfree; /* number of free Tx descriptors */
269 int sc_txnext; /* next ready Tx descriptor */
270
271 int sc_txsfree; /* number of free Tx jobs */
272 int sc_txsnext; /* next free Tx job */
273 int sc_txsdirty; /* dirty Tx jobs */
274
275 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
276 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
277
278 bus_addr_t sc_rdt_reg; /* offset of RDT register */
279
280 int sc_rxptr; /* next ready Rx descriptor/queue ent */
281 int sc_rxdiscard;
282 int sc_rxlen;
283 struct mbuf *sc_rxhead;
284 struct mbuf *sc_rxtail;
285 struct mbuf **sc_rxtailp;
286
287 uint32_t sc_ctrl; /* prototype CTRL register */
288 #if 0
289 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
290 #endif
291 uint32_t sc_icr; /* prototype interrupt bits */
292 uint32_t sc_tctl; /* prototype TCTL register */
293 uint32_t sc_rctl; /* prototype RCTL register */
294 uint32_t sc_txcw; /* prototype TXCW register */
295 uint32_t sc_tipg; /* prototype TIPG register */
296
297 int sc_tbi_linkup; /* TBI link status */
298 int sc_tbi_anstate; /* autonegotiation state */
299
300 int sc_mchash_type; /* multicast filter offset */
301
302 #if NRND > 0
303 rndsource_element_t rnd_source; /* random source */
304 #endif
305 };
306
307 #define WM_RXCHAIN_RESET(sc) \
308 do { \
309 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
310 *(sc)->sc_rxtailp = NULL; \
311 (sc)->sc_rxlen = 0; \
312 } while (/*CONSTCOND*/0)
313
314 #define WM_RXCHAIN_LINK(sc, m) \
315 do { \
316 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
317 (sc)->sc_rxtailp = &(m)->m_next; \
318 } while (/*CONSTCOND*/0)
319
320 /* sc_flags */
321 #define WM_F_HAS_MII 0x01 /* has MII */
322 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
323 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
324 #define WM_F_BUS64 0x20 /* bus is 64-bit */
325 #define WM_F_PCIX 0x40 /* bus is PCI-X */
326
327 #ifdef WM_EVENT_COUNTERS
328 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
329 #else
330 #define WM_EVCNT_INCR(ev) /* nothing */
331 #endif
332
333 #define CSR_READ(sc, reg) \
334 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
335 #define CSR_WRITE(sc, reg, val) \
336 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
337
338 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
339 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
340
341 #define WM_CDTXSYNC(sc, x, n, ops) \
342 do { \
343 int __x, __n; \
344 \
345 __x = (x); \
346 __n = (n); \
347 \
348 /* If it will wrap around, sync to the end of the ring. */ \
349 if ((__x + __n) > WM_NTXDESC) { \
350 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
351 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
352 (WM_NTXDESC - __x), (ops)); \
353 __n -= (WM_NTXDESC - __x); \
354 __x = 0; \
355 } \
356 \
357 /* Now sync whatever is left. */ \
358 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
359 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
360 } while (/*CONSTCOND*/0)
361
362 #define WM_CDRXSYNC(sc, x, ops) \
363 do { \
364 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
365 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
366 } while (/*CONSTCOND*/0)
367
368 #define WM_INIT_RXDESC(sc, x) \
369 do { \
370 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
371 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
372 struct mbuf *__m = __rxs->rxs_mbuf; \
373 \
374 /* \
375 * Note: We scoot the packet forward 2 bytes in the buffer \
376 * so that the payload after the Ethernet header is aligned \
377 * to a 4-byte boundary. \
378 * \
379 * XXX BRAINDAMAGE ALERT! \
380 * The stupid chip uses the same size for every buffer, which \
381 * is set in the Receive Control register. We are using the 2K \
382 * size option, but what we REALLY want is (2K - 2)! For this \
383 * reason, we can't "scoot" packets longer than the standard \
384 * Ethernet MTU. On strict-alignment platforms, if the total \
385 * size exceeds (2K - 2) we set align_tweak to 0 and let \
386 * the upper layer copy the headers. \
387 */ \
388 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
389 \
390 __rxd->wrx_addr.wa_low = \
391 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
392 (sc)->sc_align_tweak); \
393 __rxd->wrx_addr.wa_high = 0; \
394 __rxd->wrx_len = 0; \
395 __rxd->wrx_cksum = 0; \
396 __rxd->wrx_status = 0; \
397 __rxd->wrx_errors = 0; \
398 __rxd->wrx_special = 0; \
399 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
400 \
401 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
402 } while (/*CONSTCOND*/0)
403
404 static void wm_start(struct ifnet *);
405 static void wm_watchdog(struct ifnet *);
406 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
407 static int wm_init(struct ifnet *);
408 static void wm_stop(struct ifnet *, int);
409
410 static void wm_shutdown(void *);
411
412 static void wm_reset(struct wm_softc *);
413 static void wm_rxdrain(struct wm_softc *);
414 static int wm_add_rxbuf(struct wm_softc *, int);
415 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
416 static void wm_tick(void *);
417
418 static void wm_set_filter(struct wm_softc *);
419
420 static int wm_intr(void *);
421 static void wm_txintr(struct wm_softc *);
422 static void wm_rxintr(struct wm_softc *);
423 static void wm_linkintr(struct wm_softc *, uint32_t);
424
425 static void wm_tbi_mediainit(struct wm_softc *);
426 static int wm_tbi_mediachange(struct ifnet *);
427 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
428
429 static void wm_tbi_set_linkled(struct wm_softc *);
430 static void wm_tbi_check_link(struct wm_softc *);
431
432 static void wm_gmii_reset(struct wm_softc *);
433
434 static int wm_gmii_i82543_readreg(struct device *, int, int);
435 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
436
437 static int wm_gmii_i82544_readreg(struct device *, int, int);
438 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
439
440 static void wm_gmii_statchg(struct device *);
441
442 static void wm_gmii_mediainit(struct wm_softc *);
443 static int wm_gmii_mediachange(struct ifnet *);
444 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
445
446 static int wm_match(struct device *, struct cfdata *, void *);
447 static void wm_attach(struct device *, struct device *, void *);
448
449 CFATTACH_DECL(wm, sizeof(struct wm_softc),
450 wm_match, wm_attach, NULL, NULL);
451
452 /*
453 * Devices supported by this driver.
454 */
455 const struct wm_product {
456 pci_vendor_id_t wmp_vendor;
457 pci_product_id_t wmp_product;
458 const char *wmp_name;
459 wm_chip_type wmp_type;
460 int wmp_flags;
461 #define WMP_F_1000X 0x01
462 #define WMP_F_1000T 0x02
463 } wm_products[] = {
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
465 "Intel i82542 1000BASE-X Ethernet",
466 WM_T_82542_2_1, WMP_F_1000X },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
469 "Intel i82543GC 1000BASE-X Ethernet",
470 WM_T_82543, WMP_F_1000X },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
473 "Intel i82543GC 1000BASE-T Ethernet",
474 WM_T_82543, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
477 "Intel i82544EI 1000BASE-T Ethernet",
478 WM_T_82544, WMP_F_1000T },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
481 "Intel i82544EI 1000BASE-X Ethernet",
482 WM_T_82544, WMP_F_1000X },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
485 "Intel i82544GC 1000BASE-T Ethernet",
486 WM_T_82544, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
489 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
490 WM_T_82544, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
493 "Intel i82540EM 1000BASE-T Ethernet",
494 WM_T_82540, WMP_F_1000T },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
497 "Intel i82540EP 1000BASE-T Ethernet",
498 WM_T_82540, WMP_F_1000T },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
501 "Intel i82540EP 1000BASE-T Ethernet",
502 WM_T_82540, WMP_F_1000T },
503
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
505 "Intel i82540EP 1000BASE-T Ethernet",
506 WM_T_82540, WMP_F_1000T },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
509 "Intel i82545EM 1000BASE-T Ethernet",
510 WM_T_82545, WMP_F_1000T },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
513 "Intel i82546EB 1000BASE-T Ethernet",
514 WM_T_82546, WMP_F_1000T },
515
516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
517 "Intel i82546EB 1000BASE-T Ethernet",
518 WM_T_82546, WMP_F_1000T },
519
520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
521 "Intel i82545EM 1000BASE-X Ethernet",
522 WM_T_82545, WMP_F_1000X },
523
524 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
525 "Intel i82546EB 1000BASE-X Ethernet",
526 WM_T_82546, WMP_F_1000X },
527
528 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
529 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
530 WM_T_82540, WMP_F_1000T },
531
532 { 0, 0,
533 NULL,
534 0, 0 },
535 };
536
537 #ifdef WM_EVENT_COUNTERS
538 #if WM_NTXSEGS != 16
539 #error Update wm_txseg_evcnt_names
540 #endif
541 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
542 "txseg1",
543 "txseg2",
544 "txseg3",
545 "txseg4",
546 "txseg5",
547 "txseg6",
548 "txseg7",
549 "txseg8",
550 "txseg9",
551 "txseg10",
552 "txseg11",
553 "txseg12",
554 "txseg13",
555 "txseg14",
556 "txseg15",
557 "txseg16",
558 };
559 #endif /* WM_EVENT_COUNTERS */
560
561 #if 0 /* Not currently used */
562 static __inline uint32_t
563 wm_io_read(struct wm_softc *sc, int reg)
564 {
565
566 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
567 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
568 }
569 #endif
570
571 static __inline void
572 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
573 {
574
575 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
576 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
577 }
578
579 static const struct wm_product *
580 wm_lookup(const struct pci_attach_args *pa)
581 {
582 const struct wm_product *wmp;
583
584 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
585 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
586 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
587 return (wmp);
588 }
589 return (NULL);
590 }
591
592 static int
593 wm_match(struct device *parent, struct cfdata *cf, void *aux)
594 {
595 struct pci_attach_args *pa = aux;
596
597 if (wm_lookup(pa) != NULL)
598 return (1);
599
600 return (0);
601 }
602
603 static void
604 wm_attach(struct device *parent, struct device *self, void *aux)
605 {
606 struct wm_softc *sc = (void *) self;
607 struct pci_attach_args *pa = aux;
608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
609 pci_chipset_tag_t pc = pa->pa_pc;
610 pci_intr_handle_t ih;
611 const char *intrstr = NULL;
612 const char *eetype;
613 bus_space_tag_t memt;
614 bus_space_handle_t memh;
615 bus_dma_segment_t seg;
616 int memh_valid;
617 int i, rseg, error;
618 const struct wm_product *wmp;
619 uint8_t enaddr[ETHER_ADDR_LEN];
620 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
621 pcireg_t preg, memtype;
622 uint32_t reg;
623 int pmreg;
624
625 callout_init(&sc->sc_tick_ch);
626
627 wmp = wm_lookup(pa);
628 if (wmp == NULL) {
629 printf("\n");
630 panic("wm_attach: impossible");
631 }
632
633 sc->sc_dmat = pa->pa_dmat;
634
635 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
636 aprint_naive(": Ethernet controller\n");
637 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
638
639 sc->sc_type = wmp->wmp_type;
640 if (sc->sc_type < WM_T_82543) {
641 if (preg < 2) {
642 aprint_error("%s: i82542 must be at least rev. 2\n",
643 sc->sc_dev.dv_xname);
644 return;
645 }
646 if (preg < 3)
647 sc->sc_type = WM_T_82542_2_0;
648 }
649
650 /*
651 * Map the device. All devices support memory-mapped acccess,
652 * and it is really required for normal operation.
653 */
654 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
655 switch (memtype) {
656 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
657 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
658 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
659 memtype, 0, &memt, &memh, NULL, NULL) == 0);
660 break;
661 default:
662 memh_valid = 0;
663 }
664
665 if (memh_valid) {
666 sc->sc_st = memt;
667 sc->sc_sh = memh;
668 } else {
669 aprint_error("%s: unable to map device registers\n",
670 sc->sc_dev.dv_xname);
671 return;
672 }
673
674 /*
675 * In addition, i82544 and later support I/O mapped indirect
676 * register access. It is not desirable (nor supported in
677 * this driver) to use it for normal operation, though it is
678 * required to work around bugs in some chip versions.
679 */
680 if (sc->sc_type >= WM_T_82544) {
681 /* First we have to find the I/O BAR. */
682 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
683 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
684 PCI_MAPREG_TYPE_IO)
685 break;
686 }
687 if (i == PCI_MAPREG_END)
688 aprint_error("%s: WARNING: unable to find I/O BAR\n",
689 sc->sc_dev.dv_xname);
690 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
691 0, &sc->sc_iot, &sc->sc_ioh,
692 NULL, NULL) == 0)
693 sc->sc_flags |= WM_F_IOH_VALID;
694 else
695 aprint_error("%s: WARNING: unable to map I/O space\n",
696 sc->sc_dev.dv_xname);
697 }
698
699 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
700 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
701 preg |= PCI_COMMAND_MASTER_ENABLE;
702 if (sc->sc_type < WM_T_82542_2_1)
703 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
704 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
705
706 /* Get it out of power save mode, if needed. */
707 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
708 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
709 PCI_PMCSR_STATE_MASK;
710 if (preg == PCI_PMCSR_STATE_D3) {
711 /*
712 * The card has lost all configuration data in
713 * this state, so punt.
714 */
715 aprint_error("%s: unable to wake from power state D3\n",
716 sc->sc_dev.dv_xname);
717 return;
718 }
719 if (preg != PCI_PMCSR_STATE_D0) {
720 aprint_normal("%s: waking up from power state D%d\n",
721 sc->sc_dev.dv_xname, preg);
722 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
723 PCI_PMCSR_STATE_D0);
724 }
725 }
726
727 /*
728 * Map and establish our interrupt.
729 */
730 if (pci_intr_map(pa, &ih)) {
731 aprint_error("%s: unable to map interrupt\n",
732 sc->sc_dev.dv_xname);
733 return;
734 }
735 intrstr = pci_intr_string(pc, ih);
736 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
737 if (sc->sc_ih == NULL) {
738 aprint_error("%s: unable to establish interrupt",
739 sc->sc_dev.dv_xname);
740 if (intrstr != NULL)
741 aprint_normal(" at %s", intrstr);
742 aprint_normal("\n");
743 return;
744 }
745 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
746
747 /*
748 * Determine a few things about the bus we're connected to.
749 */
750 if (sc->sc_type < WM_T_82543) {
751 /* We don't really know the bus characteristics here. */
752 sc->sc_bus_speed = 33;
753 } else {
754 reg = CSR_READ(sc, WMREG_STATUS);
755 if (reg & STATUS_BUS64)
756 sc->sc_flags |= WM_F_BUS64;
757 if (sc->sc_type >= WM_T_82544 &&
758 (reg & STATUS_PCIX_MODE) != 0) {
759 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
760
761 sc->sc_flags |= WM_F_PCIX;
762 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
763 PCI_CAP_PCIX,
764 &sc->sc_pcix_offset, NULL) == 0)
765 aprint_error("%s: unable to find PCIX "
766 "capability\n", sc->sc_dev.dv_xname);
767 else if (sc->sc_type != WM_T_82545_3 &&
768 sc->sc_type != WM_T_82546_3) {
769 /*
770 * Work around a problem caused by the BIOS
771 * setting the max memory read byte count
772 * incorrectly.
773 */
774 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
775 sc->sc_pcix_offset + PCI_PCIX_CMD);
776 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
777 sc->sc_pcix_offset + PCI_PCIX_STATUS);
778
779 bytecnt =
780 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
781 PCI_PCIX_CMD_BYTECNT_SHIFT;
782 maxb =
783 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
784 PCI_PCIX_STATUS_MAXB_SHIFT;
785 if (bytecnt > maxb) {
786 aprint_verbose("%s: resetting PCI-X "
787 "MMRBC: %d -> %d\n",
788 sc->sc_dev.dv_xname,
789 512 << bytecnt, 512 << maxb);
790 pcix_cmd = (pcix_cmd &
791 ~PCI_PCIX_CMD_BYTECNT_MASK) |
792 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
793 pci_conf_write(pa->pa_pc, pa->pa_tag,
794 sc->sc_pcix_offset + PCI_PCIX_CMD,
795 pcix_cmd);
796 }
797 }
798 }
799 /*
800 * The quad port adapter is special; it has a PCIX-PCIX
801 * bridge on the board, and can run the secondary bus at
802 * a higher speed.
803 */
804 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
805 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
806 : 66;
807 } else if (sc->sc_flags & WM_F_PCIX) {
808 switch (STATUS_PCIXSPD(reg)) {
809 case STATUS_PCIXSPD_50_66:
810 sc->sc_bus_speed = 66;
811 break;
812 case STATUS_PCIXSPD_66_100:
813 sc->sc_bus_speed = 100;
814 break;
815 case STATUS_PCIXSPD_100_133:
816 sc->sc_bus_speed = 133;
817 break;
818 default:
819 aprint_error(
820 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
821 sc->sc_dev.dv_xname, STATUS_PCIXSPD(reg));
822 sc->sc_bus_speed = 66;
823 }
824 } else
825 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
826 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
827 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
828 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
829 }
830
831 /*
832 * Allocate the control data structures, and create and load the
833 * DMA map for it.
834 */
835 if ((error = bus_dmamem_alloc(sc->sc_dmat,
836 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
837 0)) != 0) {
838 aprint_error(
839 "%s: unable to allocate control data, error = %d\n",
840 sc->sc_dev.dv_xname, error);
841 goto fail_0;
842 }
843
844 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
845 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
846 0)) != 0) {
847 aprint_error("%s: unable to map control data, error = %d\n",
848 sc->sc_dev.dv_xname, error);
849 goto fail_1;
850 }
851
852 if ((error = bus_dmamap_create(sc->sc_dmat,
853 sizeof(struct wm_control_data), 1,
854 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
855 aprint_error("%s: unable to create control data DMA map, "
856 "error = %d\n", sc->sc_dev.dv_xname, error);
857 goto fail_2;
858 }
859
860 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
861 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
862 0)) != 0) {
863 aprint_error(
864 "%s: unable to load control data DMA map, error = %d\n",
865 sc->sc_dev.dv_xname, error);
866 goto fail_3;
867 }
868
869 /*
870 * Create the transmit buffer DMA maps.
871 */
872 for (i = 0; i < WM_TXQUEUELEN; i++) {
873 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
874 WM_NTXSEGS, MCLBYTES, 0, 0,
875 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
876 aprint_error("%s: unable to create Tx DMA map %d, "
877 "error = %d\n", sc->sc_dev.dv_xname, i, error);
878 goto fail_4;
879 }
880 }
881
882 /*
883 * Create the receive buffer DMA maps.
884 */
885 for (i = 0; i < WM_NRXDESC; i++) {
886 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
887 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
888 aprint_error("%s: unable to create Rx DMA map %d, "
889 "error = %d\n", sc->sc_dev.dv_xname, i, error);
890 goto fail_5;
891 }
892 sc->sc_rxsoft[i].rxs_mbuf = NULL;
893 }
894
895 /*
896 * Reset the chip to a known state.
897 */
898 wm_reset(sc);
899
900 /*
901 * Get some information about the EEPROM.
902 */
903 eetype = "MicroWire";
904 if (sc->sc_type >= WM_T_82540)
905 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
906 if (sc->sc_type <= WM_T_82544)
907 sc->sc_ee_addrbits = 6;
908 else if (sc->sc_type <= WM_T_82546_3) {
909 reg = CSR_READ(sc, WMREG_EECD);
910 if (reg & EECD_EE_SIZE)
911 sc->sc_ee_addrbits = 8;
912 else
913 sc->sc_ee_addrbits = 6;
914 }
915 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
916 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
917 sc->sc_ee_addrbits, eetype);
918
919 /*
920 * Read the Ethernet address from the EEPROM.
921 */
922 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
923 sizeof(myea) / sizeof(myea[0]), myea)) {
924 aprint_error("%s: unable to read Ethernet address\n",
925 sc->sc_dev.dv_xname);
926 return;
927 }
928 enaddr[0] = myea[0] & 0xff;
929 enaddr[1] = myea[0] >> 8;
930 enaddr[2] = myea[1] & 0xff;
931 enaddr[3] = myea[1] >> 8;
932 enaddr[4] = myea[2] & 0xff;
933 enaddr[5] = myea[2] >> 8;
934
935 /*
936 * Toggle the LSB of the MAC address on the second port
937 * of the i82546.
938 */
939 if (sc->sc_type == WM_T_82546) {
940 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
941 enaddr[5] ^= 1;
942 }
943
944 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
945 ether_sprintf(enaddr));
946
947 /*
948 * Read the config info from the EEPROM, and set up various
949 * bits in the control registers based on their contents.
950 */
951 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
952 aprint_error("%s: unable to read CFG1 from EEPROM\n",
953 sc->sc_dev.dv_xname);
954 return;
955 }
956 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
957 aprint_error("%s: unable to read CFG2 from EEPROM\n",
958 sc->sc_dev.dv_xname);
959 return;
960 }
961 if (sc->sc_type >= WM_T_82544) {
962 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
963 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
964 sc->sc_dev.dv_xname);
965 return;
966 }
967 }
968
969 if (cfg1 & EEPROM_CFG1_ILOS)
970 sc->sc_ctrl |= CTRL_ILOS;
971 if (sc->sc_type >= WM_T_82544) {
972 sc->sc_ctrl |=
973 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
974 CTRL_SWDPIO_SHIFT;
975 sc->sc_ctrl |=
976 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
977 CTRL_SWDPINS_SHIFT;
978 } else {
979 sc->sc_ctrl |=
980 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
981 CTRL_SWDPIO_SHIFT;
982 }
983
984 #if 0
985 if (sc->sc_type >= WM_T_82544) {
986 if (cfg1 & EEPROM_CFG1_IPS0)
987 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
988 if (cfg1 & EEPROM_CFG1_IPS1)
989 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
990 sc->sc_ctrl_ext |=
991 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
992 CTRL_EXT_SWDPIO_SHIFT;
993 sc->sc_ctrl_ext |=
994 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
995 CTRL_EXT_SWDPINS_SHIFT;
996 } else {
997 sc->sc_ctrl_ext |=
998 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
999 CTRL_EXT_SWDPIO_SHIFT;
1000 }
1001 #endif
1002
1003 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1004 #if 0
1005 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1006 #endif
1007
1008 /*
1009 * Set up some register offsets that are different between
1010 * the i82542 and the i82543 and later chips.
1011 */
1012 if (sc->sc_type < WM_T_82543) {
1013 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1014 sc->sc_tdt_reg = WMREG_OLD_TDT;
1015 } else {
1016 sc->sc_rdt_reg = WMREG_RDT;
1017 sc->sc_tdt_reg = WMREG_TDT;
1018 }
1019
1020 /*
1021 * Determine if we should use flow control. We should
1022 * always use it, unless we're on a i82542 < 2.1.
1023 */
1024 if (sc->sc_type >= WM_T_82542_2_1)
1025 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
1026
1027 /*
1028 * Determine if we're TBI or GMII mode, and initialize the
1029 * media structures accordingly.
1030 */
1031 if (sc->sc_type < WM_T_82543 ||
1032 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1033 if (wmp->wmp_flags & WMP_F_1000T)
1034 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1035 "product!\n", sc->sc_dev.dv_xname);
1036 wm_tbi_mediainit(sc);
1037 } else {
1038 if (wmp->wmp_flags & WMP_F_1000X)
1039 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1040 "product!\n", sc->sc_dev.dv_xname);
1041 wm_gmii_mediainit(sc);
1042 }
1043
1044 ifp = &sc->sc_ethercom.ec_if;
1045 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1046 ifp->if_softc = sc;
1047 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1048 ifp->if_ioctl = wm_ioctl;
1049 ifp->if_start = wm_start;
1050 ifp->if_watchdog = wm_watchdog;
1051 ifp->if_init = wm_init;
1052 ifp->if_stop = wm_stop;
1053 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
1054 IFQ_SET_READY(&ifp->if_snd);
1055
1056 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1057
1058 /*
1059 * If we're a i82543 or greater, we can support VLANs.
1060 */
1061 if (sc->sc_type >= WM_T_82543)
1062 sc->sc_ethercom.ec_capabilities |=
1063 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1064
1065 /*
1066 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1067 * on i82543 and later.
1068 */
1069 if (sc->sc_type >= WM_T_82543)
1070 ifp->if_capabilities |=
1071 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1072
1073 /*
1074 * Attach the interface.
1075 */
1076 if_attach(ifp);
1077 ether_ifattach(ifp, enaddr);
1078 #if NRND > 0
1079 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1080 RND_TYPE_NET, 0);
1081 #endif
1082
1083 #ifdef WM_EVENT_COUNTERS
1084 /* Attach event counters. */
1085 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1086 NULL, sc->sc_dev.dv_xname, "txsstall");
1087 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1088 NULL, sc->sc_dev.dv_xname, "txdstall");
1089 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1090 NULL, sc->sc_dev.dv_xname, "txforceintr");
1091 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1092 NULL, sc->sc_dev.dv_xname, "txdw");
1093 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1094 NULL, sc->sc_dev.dv_xname, "txqe");
1095 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1096 NULL, sc->sc_dev.dv_xname, "rxintr");
1097 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1098 NULL, sc->sc_dev.dv_xname, "linkintr");
1099
1100 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1101 NULL, sc->sc_dev.dv_xname, "rxipsum");
1102 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1103 NULL, sc->sc_dev.dv_xname, "rxtusum");
1104 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1105 NULL, sc->sc_dev.dv_xname, "txipsum");
1106 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1107 NULL, sc->sc_dev.dv_xname, "txtusum");
1108
1109 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1110 NULL, sc->sc_dev.dv_xname, "txctx init");
1111 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1112 NULL, sc->sc_dev.dv_xname, "txctx hit");
1113 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1114 NULL, sc->sc_dev.dv_xname, "txctx miss");
1115
1116 for (i = 0; i < WM_NTXSEGS; i++)
1117 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1118 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1119
1120 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1121 NULL, sc->sc_dev.dv_xname, "txdrop");
1122
1123 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1124 NULL, sc->sc_dev.dv_xname, "tu");
1125 #endif /* WM_EVENT_COUNTERS */
1126
1127 /*
1128 * Make sure the interface is shutdown during reboot.
1129 */
1130 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1131 if (sc->sc_sdhook == NULL)
1132 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1133 sc->sc_dev.dv_xname);
1134 return;
1135
1136 /*
1137 * Free any resources we've allocated during the failed attach
1138 * attempt. Do this in reverse order and fall through.
1139 */
1140 fail_5:
1141 for (i = 0; i < WM_NRXDESC; i++) {
1142 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1143 bus_dmamap_destroy(sc->sc_dmat,
1144 sc->sc_rxsoft[i].rxs_dmamap);
1145 }
1146 fail_4:
1147 for (i = 0; i < WM_TXQUEUELEN; i++) {
1148 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1149 bus_dmamap_destroy(sc->sc_dmat,
1150 sc->sc_txsoft[i].txs_dmamap);
1151 }
1152 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1153 fail_3:
1154 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1155 fail_2:
1156 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1157 sizeof(struct wm_control_data));
1158 fail_1:
1159 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1160 fail_0:
1161 return;
1162 }
1163
1164 /*
1165 * wm_shutdown:
1166 *
1167 * Make sure the interface is stopped at reboot time.
1168 */
1169 static void
1170 wm_shutdown(void *arg)
1171 {
1172 struct wm_softc *sc = arg;
1173
1174 wm_stop(&sc->sc_ethercom.ec_if, 1);
1175 }
1176
1177 /*
1178 * wm_tx_cksum:
1179 *
1180 * Set up TCP/IP checksumming parameters for the
1181 * specified packet.
1182 */
1183 static int
1184 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1185 uint32_t *fieldsp)
1186 {
1187 struct mbuf *m0 = txs->txs_mbuf;
1188 struct livengood_tcpip_ctxdesc *t;
1189 uint32_t fields = 0, ipcs, tucs;
1190 struct ip *ip;
1191 struct ether_header *eh;
1192 int offset, iphl;
1193
1194 /*
1195 * XXX It would be nice if the mbuf pkthdr had offset
1196 * fields for the protocol headers.
1197 */
1198
1199 eh = mtod(m0, struct ether_header *);
1200 switch (htons(eh->ether_type)) {
1201 case ETHERTYPE_IP:
1202 iphl = sizeof(struct ip);
1203 offset = ETHER_HDR_LEN;
1204 break;
1205
1206 case ETHERTYPE_VLAN:
1207 iphl = sizeof(struct ip);
1208 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1209 break;
1210
1211 default:
1212 /*
1213 * Don't support this protocol or encapsulation.
1214 */
1215 *fieldsp = 0;
1216 *cmdp = 0;
1217 return (0);
1218 }
1219
1220 if (m0->m_len < (offset + iphl)) {
1221 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1222 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1223 "packet dropped\n", sc->sc_dev.dv_xname);
1224 return (ENOMEM);
1225 }
1226 m0 = txs->txs_mbuf;
1227 }
1228
1229 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1230 iphl = ip->ip_hl << 2;
1231
1232 /*
1233 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1234 * offload feature, if we load the context descriptor, we
1235 * MUST provide valid values for IPCSS and TUCSS fields.
1236 */
1237
1238 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1239 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1240 fields |= htole32(WTX_IXSM);
1241 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1242 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1243 WTX_TCPIP_IPCSE(offset + iphl - 1));
1244 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1245 /* Use the cached value. */
1246 ipcs = sc->sc_txctx_ipcs;
1247 } else {
1248 /* Just initialize it to the likely value anyway. */
1249 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1250 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1251 WTX_TCPIP_IPCSE(offset + iphl - 1));
1252 }
1253
1254 offset += iphl;
1255
1256 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1257 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1258 fields |= htole32(WTX_TXSM);
1259 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1260 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1261 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1262 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1263 /* Use the cached value. */
1264 tucs = sc->sc_txctx_tucs;
1265 } else {
1266 /* Just initialize it to a valid TCP context. */
1267 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1268 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1269 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1270 }
1271
1272 if (sc->sc_txctx_ipcs == ipcs &&
1273 sc->sc_txctx_tucs == tucs) {
1274 /* Cached context is fine. */
1275 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1276 } else {
1277 /* Fill in the context descriptor. */
1278 #ifdef WM_EVENT_COUNTERS
1279 if (sc->sc_txctx_ipcs == 0xffffffff &&
1280 sc->sc_txctx_tucs == 0xffffffff)
1281 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1282 else
1283 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1284 #endif
1285 t = (struct livengood_tcpip_ctxdesc *)
1286 &sc->sc_txdescs[sc->sc_txnext];
1287 t->tcpip_ipcs = ipcs;
1288 t->tcpip_tucs = tucs;
1289 t->tcpip_cmdlen =
1290 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1291 t->tcpip_seg = 0;
1292 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1293
1294 sc->sc_txctx_ipcs = ipcs;
1295 sc->sc_txctx_tucs = tucs;
1296
1297 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1298 txs->txs_ndesc++;
1299 }
1300
1301 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1302 *fieldsp = fields;
1303
1304 return (0);
1305 }
1306
1307 /*
1308 * wm_start: [ifnet interface function]
1309 *
1310 * Start packet transmission on the interface.
1311 */
1312 static void
1313 wm_start(struct ifnet *ifp)
1314 {
1315 struct wm_softc *sc = ifp->if_softc;
1316 struct mbuf *m0;
1317 #if 0 /* XXXJRT */
1318 struct m_tag *mtag;
1319 #endif
1320 struct wm_txsoft *txs;
1321 bus_dmamap_t dmamap;
1322 int error, nexttx, lasttx, ofree, seg;
1323 uint32_t cksumcmd, cksumfields;
1324
1325 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1326 return;
1327
1328 /*
1329 * Remember the previous number of free descriptors.
1330 */
1331 ofree = sc->sc_txfree;
1332
1333 /*
1334 * Loop through the send queue, setting up transmit descriptors
1335 * until we drain the queue, or use up all available transmit
1336 * descriptors.
1337 */
1338 for (;;) {
1339 /* Grab a packet off the queue. */
1340 IFQ_POLL(&ifp->if_snd, m0);
1341 if (m0 == NULL)
1342 break;
1343
1344 DPRINTF(WM_DEBUG_TX,
1345 ("%s: TX: have packet to transmit: %p\n",
1346 sc->sc_dev.dv_xname, m0));
1347
1348 /* Get a work queue entry. */
1349 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1350 wm_txintr(sc);
1351 if (sc->sc_txsfree == 0) {
1352 DPRINTF(WM_DEBUG_TX,
1353 ("%s: TX: no free job descriptors\n",
1354 sc->sc_dev.dv_xname));
1355 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1356 break;
1357 }
1358 }
1359
1360 txs = &sc->sc_txsoft[sc->sc_txsnext];
1361 dmamap = txs->txs_dmamap;
1362
1363 /*
1364 * Load the DMA map. If this fails, the packet either
1365 * didn't fit in the allotted number of segments, or we
1366 * were short on resources. For the too-many-segments
1367 * case, we simply report an error and drop the packet,
1368 * since we can't sanely copy a jumbo packet to a single
1369 * buffer.
1370 */
1371 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1372 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1373 if (error) {
1374 if (error == EFBIG) {
1375 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1376 printf("%s: Tx packet consumes too many "
1377 "DMA segments, dropping...\n",
1378 sc->sc_dev.dv_xname);
1379 IFQ_DEQUEUE(&ifp->if_snd, m0);
1380 m_freem(m0);
1381 continue;
1382 }
1383 /*
1384 * Short on resources, just stop for now.
1385 */
1386 DPRINTF(WM_DEBUG_TX,
1387 ("%s: TX: dmamap load failed: %d\n",
1388 sc->sc_dev.dv_xname, error));
1389 break;
1390 }
1391
1392 /*
1393 * Ensure we have enough descriptors free to describe
1394 * the packet. Note, we always reserve one descriptor
1395 * at the end of the ring due to the semantics of the
1396 * TDT register, plus one more in the event we need
1397 * to re-load checksum offload context.
1398 */
1399 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1400 /*
1401 * Not enough free descriptors to transmit this
1402 * packet. We haven't committed anything yet,
1403 * so just unload the DMA map, put the packet
1404 * pack on the queue, and punt. Notify the upper
1405 * layer that there are no more slots left.
1406 */
1407 DPRINTF(WM_DEBUG_TX,
1408 ("%s: TX: need %d descriptors, have %d\n",
1409 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1410 sc->sc_txfree - 1));
1411 ifp->if_flags |= IFF_OACTIVE;
1412 bus_dmamap_unload(sc->sc_dmat, dmamap);
1413 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1414 break;
1415 }
1416
1417 IFQ_DEQUEUE(&ifp->if_snd, m0);
1418
1419 /*
1420 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1421 */
1422
1423 /* Sync the DMA map. */
1424 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1425 BUS_DMASYNC_PREWRITE);
1426
1427 DPRINTF(WM_DEBUG_TX,
1428 ("%s: TX: packet has %d DMA segments\n",
1429 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1430
1431 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1432
1433 /*
1434 * Store a pointer to the packet so that we can free it
1435 * later.
1436 *
1437 * Initially, we consider the number of descriptors the
1438 * packet uses the number of DMA segments. This may be
1439 * incremented by 1 if we do checksum offload (a descriptor
1440 * is used to set the checksum context).
1441 */
1442 txs->txs_mbuf = m0;
1443 txs->txs_firstdesc = sc->sc_txnext;
1444 txs->txs_ndesc = dmamap->dm_nsegs;
1445
1446 /*
1447 * Set up checksum offload parameters for
1448 * this packet.
1449 */
1450 if (m0->m_pkthdr.csum_flags &
1451 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1452 if (wm_tx_cksum(sc, txs, &cksumcmd,
1453 &cksumfields) != 0) {
1454 /* Error message already displayed. */
1455 bus_dmamap_unload(sc->sc_dmat, dmamap);
1456 continue;
1457 }
1458 } else {
1459 cksumcmd = 0;
1460 cksumfields = 0;
1461 }
1462
1463 cksumcmd |= htole32(WTX_CMD_IDE);
1464
1465 /*
1466 * Initialize the transmit descriptor.
1467 */
1468 for (nexttx = sc->sc_txnext, seg = 0;
1469 seg < dmamap->dm_nsegs;
1470 seg++, nexttx = WM_NEXTTX(nexttx)) {
1471 /*
1472 * Note: we currently only use 32-bit DMA
1473 * addresses.
1474 */
1475 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1476 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1477 htole32(dmamap->dm_segs[seg].ds_addr);
1478 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1479 htole32(dmamap->dm_segs[seg].ds_len);
1480 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1481 cksumfields;
1482 lasttx = nexttx;
1483
1484 DPRINTF(WM_DEBUG_TX,
1485 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1486 sc->sc_dev.dv_xname, nexttx,
1487 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1488 (uint32_t) dmamap->dm_segs[seg].ds_len));
1489 }
1490
1491 /*
1492 * Set up the command byte on the last descriptor of
1493 * the packet. If we're in the interrupt delay window,
1494 * delay the interrupt.
1495 */
1496 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1497 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1498
1499 #if 0 /* XXXJRT */
1500 /*
1501 * If VLANs are enabled and the packet has a VLAN tag, set
1502 * up the descriptor to encapsulate the packet for us.
1503 *
1504 * This is only valid on the last descriptor of the packet.
1505 */
1506 if (sc->sc_ethercom.ec_nvlans != 0 &&
1507 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1508 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1509 htole32(WTX_CMD_VLE);
1510 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1511 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1512 }
1513 #endif /* XXXJRT */
1514
1515 txs->txs_lastdesc = lasttx;
1516
1517 DPRINTF(WM_DEBUG_TX,
1518 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1519 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1520
1521 /* Sync the descriptors we're using. */
1522 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1523 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1524
1525 /* Give the packet to the chip. */
1526 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1527
1528 DPRINTF(WM_DEBUG_TX,
1529 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1530
1531 DPRINTF(WM_DEBUG_TX,
1532 ("%s: TX: finished transmitting packet, job %d\n",
1533 sc->sc_dev.dv_xname, sc->sc_txsnext));
1534
1535 /* Advance the tx pointer. */
1536 sc->sc_txfree -= txs->txs_ndesc;
1537 sc->sc_txnext = nexttx;
1538
1539 sc->sc_txsfree--;
1540 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1541
1542 #if NBPFILTER > 0
1543 /* Pass the packet to any BPF listeners. */
1544 if (ifp->if_bpf)
1545 bpf_mtap(ifp->if_bpf, m0);
1546 #endif /* NBPFILTER > 0 */
1547 }
1548
1549 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1550 /* No more slots; notify upper layer. */
1551 ifp->if_flags |= IFF_OACTIVE;
1552 }
1553
1554 if (sc->sc_txfree != ofree) {
1555 /* Set a watchdog timer in case the chip flakes out. */
1556 ifp->if_timer = 5;
1557 }
1558 }
1559
1560 /*
1561 * wm_watchdog: [ifnet interface function]
1562 *
1563 * Watchdog timer handler.
1564 */
1565 static void
1566 wm_watchdog(struct ifnet *ifp)
1567 {
1568 struct wm_softc *sc = ifp->if_softc;
1569
1570 /*
1571 * Since we're using delayed interrupts, sweep up
1572 * before we report an error.
1573 */
1574 wm_txintr(sc);
1575
1576 if (sc->sc_txfree != WM_NTXDESC) {
1577 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1578 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1579 sc->sc_txnext);
1580 ifp->if_oerrors++;
1581
1582 /* Reset the interface. */
1583 (void) wm_init(ifp);
1584 }
1585
1586 /* Try to get more packets going. */
1587 wm_start(ifp);
1588 }
1589
1590 /*
1591 * wm_ioctl: [ifnet interface function]
1592 *
1593 * Handle control requests from the operator.
1594 */
1595 static int
1596 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1597 {
1598 struct wm_softc *sc = ifp->if_softc;
1599 struct ifreq *ifr = (struct ifreq *) data;
1600 int s, error;
1601
1602 s = splnet();
1603
1604 switch (cmd) {
1605 case SIOCSIFMEDIA:
1606 case SIOCGIFMEDIA:
1607 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1608 break;
1609 default:
1610 error = ether_ioctl(ifp, cmd, data);
1611 if (error == ENETRESET) {
1612 /*
1613 * Multicast list has changed; set the hardware filter
1614 * accordingly.
1615 */
1616 wm_set_filter(sc);
1617 error = 0;
1618 }
1619 break;
1620 }
1621
1622 /* Try to get more packets going. */
1623 wm_start(ifp);
1624
1625 splx(s);
1626 return (error);
1627 }
1628
1629 /*
1630 * wm_intr:
1631 *
1632 * Interrupt service routine.
1633 */
1634 static int
1635 wm_intr(void *arg)
1636 {
1637 struct wm_softc *sc = arg;
1638 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1639 uint32_t icr;
1640 int wantinit, handled = 0;
1641
1642 for (wantinit = 0; wantinit == 0;) {
1643 icr = CSR_READ(sc, WMREG_ICR);
1644 if ((icr & sc->sc_icr) == 0)
1645 break;
1646
1647 #if 0 /*NRND > 0*/
1648 if (RND_ENABLED(&sc->rnd_source))
1649 rnd_add_uint32(&sc->rnd_source, icr);
1650 #endif
1651
1652 handled = 1;
1653
1654 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1655 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1656 DPRINTF(WM_DEBUG_RX,
1657 ("%s: RX: got Rx intr 0x%08x\n",
1658 sc->sc_dev.dv_xname,
1659 icr & (ICR_RXDMT0|ICR_RXT0)));
1660 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1661 }
1662 #endif
1663 wm_rxintr(sc);
1664
1665 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1666 if (icr & ICR_TXDW) {
1667 DPRINTF(WM_DEBUG_TX,
1668 ("%s: TX: got TDXW interrupt\n",
1669 sc->sc_dev.dv_xname));
1670 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1671 }
1672 #endif
1673 wm_txintr(sc);
1674
1675 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1676 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1677 wm_linkintr(sc, icr);
1678 }
1679
1680 if (icr & ICR_RXO) {
1681 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1682 wantinit = 1;
1683 }
1684 }
1685
1686 if (handled) {
1687 if (wantinit)
1688 wm_init(ifp);
1689
1690 /* Try to get more packets going. */
1691 wm_start(ifp);
1692 }
1693
1694 return (handled);
1695 }
1696
1697 /*
1698 * wm_txintr:
1699 *
1700 * Helper; handle transmit interrupts.
1701 */
1702 static void
1703 wm_txintr(struct wm_softc *sc)
1704 {
1705 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1706 struct wm_txsoft *txs;
1707 uint8_t status;
1708 int i;
1709
1710 ifp->if_flags &= ~IFF_OACTIVE;
1711
1712 /*
1713 * Go through the Tx list and free mbufs for those
1714 * frames which have been transmitted.
1715 */
1716 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1717 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1718 txs = &sc->sc_txsoft[i];
1719
1720 DPRINTF(WM_DEBUG_TX,
1721 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1722
1723 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1724 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1725
1726 status = le32toh(sc->sc_txdescs[
1727 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1728 if ((status & WTX_ST_DD) == 0) {
1729 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1730 BUS_DMASYNC_PREREAD);
1731 break;
1732 }
1733
1734 DPRINTF(WM_DEBUG_TX,
1735 ("%s: TX: job %d done: descs %d..%d\n",
1736 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1737 txs->txs_lastdesc));
1738
1739 /*
1740 * XXX We should probably be using the statistics
1741 * XXX registers, but I don't know if they exist
1742 * XXX on chips before the i82544.
1743 */
1744
1745 #ifdef WM_EVENT_COUNTERS
1746 if (status & WTX_ST_TU)
1747 WM_EVCNT_INCR(&sc->sc_ev_tu);
1748 #endif /* WM_EVENT_COUNTERS */
1749
1750 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1751 ifp->if_oerrors++;
1752 if (status & WTX_ST_LC)
1753 printf("%s: late collision\n",
1754 sc->sc_dev.dv_xname);
1755 else if (status & WTX_ST_EC) {
1756 ifp->if_collisions += 16;
1757 printf("%s: excessive collisions\n",
1758 sc->sc_dev.dv_xname);
1759 }
1760 } else
1761 ifp->if_opackets++;
1762
1763 sc->sc_txfree += txs->txs_ndesc;
1764 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1765 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1766 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1767 m_freem(txs->txs_mbuf);
1768 txs->txs_mbuf = NULL;
1769 }
1770
1771 /* Update the dirty transmit buffer pointer. */
1772 sc->sc_txsdirty = i;
1773 DPRINTF(WM_DEBUG_TX,
1774 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1775
1776 /*
1777 * If there are no more pending transmissions, cancel the watchdog
1778 * timer.
1779 */
1780 if (sc->sc_txsfree == WM_TXQUEUELEN)
1781 ifp->if_timer = 0;
1782 }
1783
1784 /*
1785 * wm_rxintr:
1786 *
1787 * Helper; handle receive interrupts.
1788 */
1789 static void
1790 wm_rxintr(struct wm_softc *sc)
1791 {
1792 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1793 struct wm_rxsoft *rxs;
1794 struct mbuf *m;
1795 int i, len;
1796 uint8_t status, errors;
1797
1798 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1799 rxs = &sc->sc_rxsoft[i];
1800
1801 DPRINTF(WM_DEBUG_RX,
1802 ("%s: RX: checking descriptor %d\n",
1803 sc->sc_dev.dv_xname, i));
1804
1805 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1806
1807 status = sc->sc_rxdescs[i].wrx_status;
1808 errors = sc->sc_rxdescs[i].wrx_errors;
1809 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1810
1811 if ((status & WRX_ST_DD) == 0) {
1812 /*
1813 * We have processed all of the receive descriptors.
1814 */
1815 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1816 break;
1817 }
1818
1819 if (__predict_false(sc->sc_rxdiscard)) {
1820 DPRINTF(WM_DEBUG_RX,
1821 ("%s: RX: discarding contents of descriptor %d\n",
1822 sc->sc_dev.dv_xname, i));
1823 WM_INIT_RXDESC(sc, i);
1824 if (status & WRX_ST_EOP) {
1825 /* Reset our state. */
1826 DPRINTF(WM_DEBUG_RX,
1827 ("%s: RX: resetting rxdiscard -> 0\n",
1828 sc->sc_dev.dv_xname));
1829 sc->sc_rxdiscard = 0;
1830 }
1831 continue;
1832 }
1833
1834 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1835 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1836
1837 m = rxs->rxs_mbuf;
1838
1839 /*
1840 * Add a new receive buffer to the ring.
1841 */
1842 if (wm_add_rxbuf(sc, i) != 0) {
1843 /*
1844 * Failed, throw away what we've done so
1845 * far, and discard the rest of the packet.
1846 */
1847 ifp->if_ierrors++;
1848 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1849 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1850 WM_INIT_RXDESC(sc, i);
1851 if ((status & WRX_ST_EOP) == 0)
1852 sc->sc_rxdiscard = 1;
1853 if (sc->sc_rxhead != NULL)
1854 m_freem(sc->sc_rxhead);
1855 WM_RXCHAIN_RESET(sc);
1856 DPRINTF(WM_DEBUG_RX,
1857 ("%s: RX: Rx buffer allocation failed, "
1858 "dropping packet%s\n", sc->sc_dev.dv_xname,
1859 sc->sc_rxdiscard ? " (discard)" : ""));
1860 continue;
1861 }
1862
1863 WM_RXCHAIN_LINK(sc, m);
1864
1865 m->m_len = len;
1866
1867 DPRINTF(WM_DEBUG_RX,
1868 ("%s: RX: buffer at %p len %d\n",
1869 sc->sc_dev.dv_xname, m->m_data, len));
1870
1871 /*
1872 * If this is not the end of the packet, keep
1873 * looking.
1874 */
1875 if ((status & WRX_ST_EOP) == 0) {
1876 sc->sc_rxlen += len;
1877 DPRINTF(WM_DEBUG_RX,
1878 ("%s: RX: not yet EOP, rxlen -> %d\n",
1879 sc->sc_dev.dv_xname, sc->sc_rxlen));
1880 continue;
1881 }
1882
1883 /*
1884 * Okay, we have the entire packet now...
1885 */
1886 *sc->sc_rxtailp = NULL;
1887 m = sc->sc_rxhead;
1888 len += sc->sc_rxlen;
1889
1890 WM_RXCHAIN_RESET(sc);
1891
1892 DPRINTF(WM_DEBUG_RX,
1893 ("%s: RX: have entire packet, len -> %d\n",
1894 sc->sc_dev.dv_xname, len));
1895
1896 /*
1897 * If an error occurred, update stats and drop the packet.
1898 */
1899 if (errors &
1900 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1901 ifp->if_ierrors++;
1902 if (errors & WRX_ER_SE)
1903 printf("%s: symbol error\n",
1904 sc->sc_dev.dv_xname);
1905 else if (errors & WRX_ER_SEQ)
1906 printf("%s: receive sequence error\n",
1907 sc->sc_dev.dv_xname);
1908 else if (errors & WRX_ER_CE)
1909 printf("%s: CRC error\n",
1910 sc->sc_dev.dv_xname);
1911 m_freem(m);
1912 continue;
1913 }
1914
1915 /*
1916 * No errors. Receive the packet.
1917 *
1918 * Note, we have configured the chip to include the
1919 * CRC with every packet.
1920 */
1921 m->m_flags |= M_HASFCS;
1922 m->m_pkthdr.rcvif = ifp;
1923 m->m_pkthdr.len = len;
1924
1925 #if 0 /* XXXJRT */
1926 /*
1927 * If VLANs are enabled, VLAN packets have been unwrapped
1928 * for us. Associate the tag with the packet.
1929 */
1930 if (sc->sc_ethercom.ec_nvlans != 0 &&
1931 (status & WRX_ST_VP) != 0) {
1932 struct m_tag *vtag;
1933
1934 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1935 M_NOWAIT);
1936 if (vtag == NULL) {
1937 ifp->if_ierrors++;
1938 printf("%s: unable to allocate VLAN tag\n",
1939 sc->sc_dev.dv_xname);
1940 m_freem(m);
1941 continue;
1942 }
1943
1944 *(u_int *)(vtag + 1) =
1945 le16toh(sc->sc_rxdescs[i].wrx_special);
1946 }
1947 #endif /* XXXJRT */
1948
1949 /*
1950 * Set up checksum info for this packet.
1951 */
1952 if (status & WRX_ST_IPCS) {
1953 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1954 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1955 if (errors & WRX_ER_IPE)
1956 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1957 }
1958 if (status & WRX_ST_TCPCS) {
1959 /*
1960 * Note: we don't know if this was TCP or UDP,
1961 * so we just set both bits, and expect the
1962 * upper layers to deal.
1963 */
1964 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1965 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1966 if (errors & WRX_ER_TCPE)
1967 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1968 }
1969
1970 ifp->if_ipackets++;
1971
1972 #if NBPFILTER > 0
1973 /* Pass this up to any BPF listeners. */
1974 if (ifp->if_bpf)
1975 bpf_mtap(ifp->if_bpf, m);
1976 #endif /* NBPFILTER > 0 */
1977
1978 /* Pass it on. */
1979 (*ifp->if_input)(ifp, m);
1980 }
1981
1982 /* Update the receive pointer. */
1983 sc->sc_rxptr = i;
1984
1985 DPRINTF(WM_DEBUG_RX,
1986 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1987 }
1988
1989 /*
1990 * wm_linkintr:
1991 *
1992 * Helper; handle link interrupts.
1993 */
1994 static void
1995 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1996 {
1997 uint32_t status;
1998
1999 /*
2000 * If we get a link status interrupt on a 1000BASE-T
2001 * device, just fall into the normal MII tick path.
2002 */
2003 if (sc->sc_flags & WM_F_HAS_MII) {
2004 if (icr & ICR_LSC) {
2005 DPRINTF(WM_DEBUG_LINK,
2006 ("%s: LINK: LSC -> mii_tick\n",
2007 sc->sc_dev.dv_xname));
2008 mii_tick(&sc->sc_mii);
2009 } else if (icr & ICR_RXSEQ) {
2010 DPRINTF(WM_DEBUG_LINK,
2011 ("%s: LINK Receive sequence error\n",
2012 sc->sc_dev.dv_xname));
2013 }
2014 return;
2015 }
2016
2017 /*
2018 * If we are now receiving /C/, check for link again in
2019 * a couple of link clock ticks.
2020 */
2021 if (icr & ICR_RXCFG) {
2022 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2023 sc->sc_dev.dv_xname));
2024 sc->sc_tbi_anstate = 2;
2025 }
2026
2027 if (icr & ICR_LSC) {
2028 status = CSR_READ(sc, WMREG_STATUS);
2029 if (status & STATUS_LU) {
2030 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2031 sc->sc_dev.dv_xname,
2032 (status & STATUS_FD) ? "FDX" : "HDX"));
2033 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2034 if (status & STATUS_FD)
2035 sc->sc_tctl |=
2036 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2037 else
2038 sc->sc_tctl |=
2039 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2040 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2041 sc->sc_tbi_linkup = 1;
2042 } else {
2043 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2044 sc->sc_dev.dv_xname));
2045 sc->sc_tbi_linkup = 0;
2046 }
2047 sc->sc_tbi_anstate = 2;
2048 wm_tbi_set_linkled(sc);
2049 } else if (icr & ICR_RXSEQ) {
2050 DPRINTF(WM_DEBUG_LINK,
2051 ("%s: LINK: Receive sequence error\n",
2052 sc->sc_dev.dv_xname));
2053 }
2054 }
2055
2056 /*
2057 * wm_tick:
2058 *
2059 * One second timer, used to check link status, sweep up
2060 * completed transmit jobs, etc.
2061 */
2062 static void
2063 wm_tick(void *arg)
2064 {
2065 struct wm_softc *sc = arg;
2066 int s;
2067
2068 s = splnet();
2069
2070 if (sc->sc_flags & WM_F_HAS_MII)
2071 mii_tick(&sc->sc_mii);
2072 else
2073 wm_tbi_check_link(sc);
2074
2075 splx(s);
2076
2077 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2078 }
2079
2080 /*
2081 * wm_reset:
2082 *
2083 * Reset the i82542 chip.
2084 */
2085 static void
2086 wm_reset(struct wm_softc *sc)
2087 {
2088 int i;
2089
2090 switch (sc->sc_type) {
2091 case WM_T_82544:
2092 case WM_T_82540:
2093 case WM_T_82545:
2094 case WM_T_82546:
2095 case WM_T_82541:
2096 case WM_T_82541_2:
2097 /*
2098 * These chips have a problem with the memory-mapped
2099 * write cycle when issuing the reset, so use I/O-mapped
2100 * access, if possible.
2101 */
2102 if (sc->sc_flags & WM_F_IOH_VALID)
2103 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2104 else
2105 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2106 break;
2107
2108 case WM_T_82545_3:
2109 case WM_T_82546_3:
2110 /* Use the shadow control register on these chips. */
2111 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2112 break;
2113
2114 default:
2115 /* Everything else can safely use the documented method. */
2116 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2117 break;
2118 }
2119 delay(10000);
2120
2121 for (i = 0; i < 1000; i++) {
2122 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2123 return;
2124 delay(20);
2125 }
2126
2127 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2128 printf("%s: WARNING: reset failed to complete\n",
2129 sc->sc_dev.dv_xname);
2130 }
2131
2132 /*
2133 * wm_init: [ifnet interface function]
2134 *
2135 * Initialize the interface. Must be called at splnet().
2136 */
2137 static int
2138 wm_init(struct ifnet *ifp)
2139 {
2140 struct wm_softc *sc = ifp->if_softc;
2141 struct wm_rxsoft *rxs;
2142 int i, error = 0;
2143 uint32_t reg;
2144
2145 /*
2146 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2147 * There is a small but measurable benefit to avoiding the adjusment
2148 * of the descriptor so that the headers are aligned, for normal mtu,
2149 * on such platforms. One possibility is that the DMA itself is
2150 * slightly more efficient if the front of the entire packet (instead
2151 * of the front of the headers) is aligned.
2152 *
2153 * Note we must always set align_tweak to 0 if we are using
2154 * jumbo frames.
2155 */
2156 #ifdef __NO_STRICT_ALIGNMENT
2157 sc->sc_align_tweak = 0;
2158 #else
2159 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2160 sc->sc_align_tweak = 0;
2161 else
2162 sc->sc_align_tweak = 2;
2163 #endif /* __NO_STRICT_ALIGNMENT */
2164
2165 /* Cancel any pending I/O. */
2166 wm_stop(ifp, 0);
2167
2168 /* Reset the chip to a known state. */
2169 wm_reset(sc);
2170
2171 /* Initialize the transmit descriptor ring. */
2172 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2173 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2174 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2175 sc->sc_txfree = WM_NTXDESC;
2176 sc->sc_txnext = 0;
2177
2178 sc->sc_txctx_ipcs = 0xffffffff;
2179 sc->sc_txctx_tucs = 0xffffffff;
2180
2181 if (sc->sc_type < WM_T_82543) {
2182 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2183 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2184 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2185 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2186 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2187 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2188 } else {
2189 CSR_WRITE(sc, WMREG_TBDAH, 0);
2190 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2191 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2192 CSR_WRITE(sc, WMREG_TDH, 0);
2193 CSR_WRITE(sc, WMREG_TDT, 0);
2194 CSR_WRITE(sc, WMREG_TIDV, 128);
2195
2196 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2197 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2198 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2199 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2200 }
2201 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2202 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2203
2204 /* Initialize the transmit job descriptors. */
2205 for (i = 0; i < WM_TXQUEUELEN; i++)
2206 sc->sc_txsoft[i].txs_mbuf = NULL;
2207 sc->sc_txsfree = WM_TXQUEUELEN;
2208 sc->sc_txsnext = 0;
2209 sc->sc_txsdirty = 0;
2210
2211 /*
2212 * Initialize the receive descriptor and receive job
2213 * descriptor rings.
2214 */
2215 if (sc->sc_type < WM_T_82543) {
2216 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2217 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2218 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2219 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2220 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2221 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2222
2223 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2224 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2225 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2226 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2227 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2228 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2229 } else {
2230 CSR_WRITE(sc, WMREG_RDBAH, 0);
2231 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2232 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2233 CSR_WRITE(sc, WMREG_RDH, 0);
2234 CSR_WRITE(sc, WMREG_RDT, 0);
2235 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2236 }
2237 for (i = 0; i < WM_NRXDESC; i++) {
2238 rxs = &sc->sc_rxsoft[i];
2239 if (rxs->rxs_mbuf == NULL) {
2240 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2241 printf("%s: unable to allocate or map rx "
2242 "buffer %d, error = %d\n",
2243 sc->sc_dev.dv_xname, i, error);
2244 /*
2245 * XXX Should attempt to run with fewer receive
2246 * XXX buffers instead of just failing.
2247 */
2248 wm_rxdrain(sc);
2249 goto out;
2250 }
2251 } else
2252 WM_INIT_RXDESC(sc, i);
2253 }
2254 sc->sc_rxptr = 0;
2255 sc->sc_rxdiscard = 0;
2256 WM_RXCHAIN_RESET(sc);
2257
2258 /*
2259 * Clear out the VLAN table -- we don't use it (yet).
2260 */
2261 CSR_WRITE(sc, WMREG_VET, 0);
2262 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2263 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2264
2265 /*
2266 * Set up flow-control parameters.
2267 *
2268 * XXX Values could probably stand some tuning.
2269 */
2270 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2271 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2272 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2273 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2274
2275 if (sc->sc_type < WM_T_82543) {
2276 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2277 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2278 } else {
2279 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2280 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2281 }
2282 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2283 }
2284
2285 #if 0 /* XXXJRT */
2286 /* Deal with VLAN enables. */
2287 if (sc->sc_ethercom.ec_nvlans != 0)
2288 sc->sc_ctrl |= CTRL_VME;
2289 else
2290 #endif /* XXXJRT */
2291 sc->sc_ctrl &= ~CTRL_VME;
2292
2293 /* Write the control registers. */
2294 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2295 #if 0
2296 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2297 #endif
2298
2299 /*
2300 * Set up checksum offload parameters.
2301 */
2302 reg = CSR_READ(sc, WMREG_RXCSUM);
2303 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2304 reg |= RXCSUM_IPOFL;
2305 else
2306 reg &= ~RXCSUM_IPOFL;
2307 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2308 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2309 else {
2310 reg &= ~RXCSUM_TUOFL;
2311 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2312 reg &= ~RXCSUM_IPOFL;
2313 }
2314 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2315
2316 /*
2317 * Set up the interrupt registers.
2318 */
2319 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2320 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2321 ICR_RXO | ICR_RXT0;
2322 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2323 sc->sc_icr |= ICR_RXCFG;
2324 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2325
2326 /* Set up the inter-packet gap. */
2327 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2328
2329 #if 0 /* XXXJRT */
2330 /* Set the VLAN ethernetype. */
2331 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2332 #endif
2333
2334 /*
2335 * Set up the transmit control register; we start out with
2336 * a collision distance suitable for FDX, but update it whe
2337 * we resolve the media type.
2338 */
2339 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2340 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2341 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2342
2343 /* Set the media. */
2344 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2345
2346 /*
2347 * Set up the receive control register; we actually program
2348 * the register when we set the receive filter. Use multicast
2349 * address offset type 0.
2350 *
2351 * Only the i82544 has the ability to strip the incoming
2352 * CRC, so we don't enable that feature.
2353 */
2354 sc->sc_mchash_type = 0;
2355 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2356 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2357
2358 if(MCLBYTES == 2048) {
2359 sc->sc_rctl |= RCTL_2k;
2360 } else {
2361 /*
2362 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2363 * XXX segments, dropping" -- why?
2364 */
2365 #if 0
2366 if(sc->sc_type >= WM_T_82543) {
2367 switch(MCLBYTES) {
2368 case 4096:
2369 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2370 break;
2371 case 8192:
2372 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2373 break;
2374 case 16384:
2375 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2376 break;
2377 default:
2378 panic("wm_init: MCLBYTES %d unsupported",
2379 MCLBYTES);
2380 break;
2381 }
2382 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2383 #else
2384 panic("wm_init: MCLBYTES > 2048 not supported.");
2385 #endif
2386 }
2387
2388 /* Set the receive filter. */
2389 wm_set_filter(sc);
2390
2391 /* Start the one second link check clock. */
2392 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2393
2394 /* ...all done! */
2395 ifp->if_flags |= IFF_RUNNING;
2396 ifp->if_flags &= ~IFF_OACTIVE;
2397
2398 out:
2399 if (error)
2400 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2401 return (error);
2402 }
2403
2404 /*
2405 * wm_rxdrain:
2406 *
2407 * Drain the receive queue.
2408 */
2409 static void
2410 wm_rxdrain(struct wm_softc *sc)
2411 {
2412 struct wm_rxsoft *rxs;
2413 int i;
2414
2415 for (i = 0; i < WM_NRXDESC; i++) {
2416 rxs = &sc->sc_rxsoft[i];
2417 if (rxs->rxs_mbuf != NULL) {
2418 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2419 m_freem(rxs->rxs_mbuf);
2420 rxs->rxs_mbuf = NULL;
2421 }
2422 }
2423 }
2424
2425 /*
2426 * wm_stop: [ifnet interface function]
2427 *
2428 * Stop transmission on the interface.
2429 */
2430 static void
2431 wm_stop(struct ifnet *ifp, int disable)
2432 {
2433 struct wm_softc *sc = ifp->if_softc;
2434 struct wm_txsoft *txs;
2435 int i;
2436
2437 /* Stop the one second clock. */
2438 callout_stop(&sc->sc_tick_ch);
2439
2440 if (sc->sc_flags & WM_F_HAS_MII) {
2441 /* Down the MII. */
2442 mii_down(&sc->sc_mii);
2443 }
2444
2445 /* Stop the transmit and receive processes. */
2446 CSR_WRITE(sc, WMREG_TCTL, 0);
2447 CSR_WRITE(sc, WMREG_RCTL, 0);
2448
2449 /* Release any queued transmit buffers. */
2450 for (i = 0; i < WM_TXQUEUELEN; i++) {
2451 txs = &sc->sc_txsoft[i];
2452 if (txs->txs_mbuf != NULL) {
2453 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2454 m_freem(txs->txs_mbuf);
2455 txs->txs_mbuf = NULL;
2456 }
2457 }
2458
2459 if (disable)
2460 wm_rxdrain(sc);
2461
2462 /* Mark the interface as down and cancel the watchdog timer. */
2463 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2464 ifp->if_timer = 0;
2465 }
2466
2467 /*
2468 * wm_acquire_eeprom:
2469 *
2470 * Perform the EEPROM handshake required on some chips.
2471 */
2472 static int
2473 wm_acquire_eeprom(struct wm_softc *sc)
2474 {
2475 uint32_t reg;
2476 int x;
2477
2478 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2479 reg = CSR_READ(sc, WMREG_EECD);
2480
2481 /* Request EEPROM access. */
2482 reg |= EECD_EE_REQ;
2483 CSR_WRITE(sc, WMREG_EECD, reg);
2484
2485 /* ..and wait for it to be granted. */
2486 for (x = 0; x < 100; x++) {
2487 reg = CSR_READ(sc, WMREG_EECD);
2488 if (reg & EECD_EE_GNT)
2489 break;
2490 delay(5);
2491 }
2492 if ((reg & EECD_EE_GNT) == 0) {
2493 aprint_error("%s: could not acquire EEPROM GNT\n",
2494 sc->sc_dev.dv_xname);
2495 reg &= ~EECD_EE_REQ;
2496 CSR_WRITE(sc, WMREG_EECD, reg);
2497 return (1);
2498 }
2499 }
2500
2501 return (0);
2502 }
2503
2504 /*
2505 * wm_release_eeprom:
2506 *
2507 * Release the EEPROM mutex.
2508 */
2509 static void
2510 wm_release_eeprom(struct wm_softc *sc)
2511 {
2512 uint32_t reg;
2513
2514 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2515 reg = CSR_READ(sc, WMREG_EECD);
2516 reg &= ~EECD_EE_REQ;
2517 CSR_WRITE(sc, WMREG_EECD, reg);
2518 }
2519 }
2520
2521 /*
2522 * wm_eeprom_sendbits:
2523 *
2524 * Send a series of bits to the EEPROM.
2525 */
2526 static void
2527 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2528 {
2529 uint32_t reg;
2530 int x;
2531
2532 reg = CSR_READ(sc, WMREG_EECD);
2533
2534 for (x = nbits; x > 0; x--) {
2535 if (bits & (1U << (x - 1)))
2536 reg |= EECD_DI;
2537 else
2538 reg &= ~EECD_DI;
2539 CSR_WRITE(sc, WMREG_EECD, reg);
2540 delay(2);
2541 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2542 delay(2);
2543 CSR_WRITE(sc, WMREG_EECD, reg);
2544 delay(2);
2545 }
2546 }
2547
2548 /*
2549 * wm_eeprom_recvbits:
2550 *
2551 * Receive a series of bits from the EEPROM.
2552 */
2553 static void
2554 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2555 {
2556 uint32_t reg, val;
2557 int x;
2558
2559 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2560
2561 val = 0;
2562 for (x = nbits; x > 0; x--) {
2563 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2564 delay(2);
2565 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2566 val |= (1U << (x - 1));
2567 CSR_WRITE(sc, WMREG_EECD, reg);
2568 delay(2);
2569 }
2570 *valp = val;
2571 }
2572
2573 /*
2574 * wm_read_eeprom_uwire:
2575 *
2576 * Read a word from the EEPROM using the MicroWire protocol.
2577 */
2578 static int
2579 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2580 {
2581 uint32_t reg, val;
2582 int i;
2583
2584 for (i = 0; i < wordcnt; i++) {
2585 /* Clear SK and DI. */
2586 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2587 CSR_WRITE(sc, WMREG_EECD, reg);
2588
2589 /* Set CHIP SELECT. */
2590 reg |= EECD_CS;
2591 CSR_WRITE(sc, WMREG_EECD, reg);
2592 delay(2);
2593
2594 /* Shift in the READ command. */
2595 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2596
2597 /* Shift in address. */
2598 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2599
2600 /* Shift out the data. */
2601 wm_eeprom_recvbits(sc, &val, 16);
2602 data[i] = val & 0xffff;
2603
2604 /* Clear CHIP SELECT. */
2605 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2606 CSR_WRITE(sc, WMREG_EECD, reg);
2607 delay(2);
2608 }
2609
2610 return (0);
2611 }
2612
2613 /*
2614 * wm_read_eeprom:
2615 *
2616 * Read data from the serial EEPROM.
2617 */
2618 static int
2619 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2620 {
2621 int rv;
2622
2623 if (wm_acquire_eeprom(sc))
2624 return (1);
2625
2626 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2627
2628 wm_release_eeprom(sc);
2629 return (rv);
2630 }
2631
2632 /*
2633 * wm_add_rxbuf:
2634 *
2635 * Add a receive buffer to the indiciated descriptor.
2636 */
2637 static int
2638 wm_add_rxbuf(struct wm_softc *sc, int idx)
2639 {
2640 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2641 struct mbuf *m;
2642 int error;
2643
2644 MGETHDR(m, M_DONTWAIT, MT_DATA);
2645 if (m == NULL)
2646 return (ENOBUFS);
2647
2648 MCLGET(m, M_DONTWAIT);
2649 if ((m->m_flags & M_EXT) == 0) {
2650 m_freem(m);
2651 return (ENOBUFS);
2652 }
2653
2654 if (rxs->rxs_mbuf != NULL)
2655 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2656
2657 rxs->rxs_mbuf = m;
2658
2659 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2660 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2661 BUS_DMA_READ|BUS_DMA_NOWAIT);
2662 if (error) {
2663 printf("%s: unable to load rx DMA map %d, error = %d\n",
2664 sc->sc_dev.dv_xname, idx, error);
2665 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2666 }
2667
2668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2670
2671 WM_INIT_RXDESC(sc, idx);
2672
2673 return (0);
2674 }
2675
2676 /*
2677 * wm_set_ral:
2678 *
2679 * Set an entery in the receive address list.
2680 */
2681 static void
2682 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2683 {
2684 uint32_t ral_lo, ral_hi;
2685
2686 if (enaddr != NULL) {
2687 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2688 (enaddr[3] << 24);
2689 ral_hi = enaddr[4] | (enaddr[5] << 8);
2690 ral_hi |= RAL_AV;
2691 } else {
2692 ral_lo = 0;
2693 ral_hi = 0;
2694 }
2695
2696 if (sc->sc_type >= WM_T_82544) {
2697 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2698 ral_lo);
2699 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2700 ral_hi);
2701 } else {
2702 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2703 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2704 }
2705 }
2706
2707 /*
2708 * wm_mchash:
2709 *
2710 * Compute the hash of the multicast address for the 4096-bit
2711 * multicast filter.
2712 */
2713 static uint32_t
2714 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2715 {
2716 static const int lo_shift[4] = { 4, 3, 2, 0 };
2717 static const int hi_shift[4] = { 4, 5, 6, 8 };
2718 uint32_t hash;
2719
2720 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2721 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2722
2723 return (hash & 0xfff);
2724 }
2725
2726 /*
2727 * wm_set_filter:
2728 *
2729 * Set up the receive filter.
2730 */
2731 static void
2732 wm_set_filter(struct wm_softc *sc)
2733 {
2734 struct ethercom *ec = &sc->sc_ethercom;
2735 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2736 struct ether_multi *enm;
2737 struct ether_multistep step;
2738 bus_addr_t mta_reg;
2739 uint32_t hash, reg, bit;
2740 int i;
2741
2742 if (sc->sc_type >= WM_T_82544)
2743 mta_reg = WMREG_CORDOVA_MTA;
2744 else
2745 mta_reg = WMREG_MTA;
2746
2747 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2748
2749 if (ifp->if_flags & IFF_BROADCAST)
2750 sc->sc_rctl |= RCTL_BAM;
2751 if (ifp->if_flags & IFF_PROMISC) {
2752 sc->sc_rctl |= RCTL_UPE;
2753 goto allmulti;
2754 }
2755
2756 /*
2757 * Set the station address in the first RAL slot, and
2758 * clear the remaining slots.
2759 */
2760 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2761 for (i = 1; i < WM_RAL_TABSIZE; i++)
2762 wm_set_ral(sc, NULL, i);
2763
2764 /* Clear out the multicast table. */
2765 for (i = 0; i < WM_MC_TABSIZE; i++)
2766 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2767
2768 ETHER_FIRST_MULTI(step, ec, enm);
2769 while (enm != NULL) {
2770 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2771 /*
2772 * We must listen to a range of multicast addresses.
2773 * For now, just accept all multicasts, rather than
2774 * trying to set only those filter bits needed to match
2775 * the range. (At this time, the only use of address
2776 * ranges is for IP multicast routing, for which the
2777 * range is big enough to require all bits set.)
2778 */
2779 goto allmulti;
2780 }
2781
2782 hash = wm_mchash(sc, enm->enm_addrlo);
2783
2784 reg = (hash >> 5) & 0x7f;
2785 bit = hash & 0x1f;
2786
2787 hash = CSR_READ(sc, mta_reg + (reg << 2));
2788 hash |= 1U << bit;
2789
2790 /* XXX Hardware bug?? */
2791 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2792 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2793 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2794 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2795 } else
2796 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2797
2798 ETHER_NEXT_MULTI(step, enm);
2799 }
2800
2801 ifp->if_flags &= ~IFF_ALLMULTI;
2802 goto setit;
2803
2804 allmulti:
2805 ifp->if_flags |= IFF_ALLMULTI;
2806 sc->sc_rctl |= RCTL_MPE;
2807
2808 setit:
2809 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2810 }
2811
2812 /*
2813 * wm_tbi_mediainit:
2814 *
2815 * Initialize media for use on 1000BASE-X devices.
2816 */
2817 static void
2818 wm_tbi_mediainit(struct wm_softc *sc)
2819 {
2820 const char *sep = "";
2821
2822 if (sc->sc_type < WM_T_82543)
2823 sc->sc_tipg = TIPG_WM_DFLT;
2824 else
2825 sc->sc_tipg = TIPG_LG_DFLT;
2826
2827 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2828 wm_tbi_mediastatus);
2829
2830 /*
2831 * SWD Pins:
2832 *
2833 * 0 = Link LED (output)
2834 * 1 = Loss Of Signal (input)
2835 */
2836 sc->sc_ctrl |= CTRL_SWDPIO(0);
2837 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2838
2839 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2840
2841 #define ADD(ss, mm, dd) \
2842 do { \
2843 printf("%s%s", sep, ss); \
2844 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2845 sep = ", "; \
2846 } while (/*CONSTCOND*/0)
2847
2848 printf("%s: ", sc->sc_dev.dv_xname);
2849 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2850 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2851 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2852 printf("\n");
2853
2854 #undef ADD
2855
2856 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2857 }
2858
2859 /*
2860 * wm_tbi_mediastatus: [ifmedia interface function]
2861 *
2862 * Get the current interface media status on a 1000BASE-X device.
2863 */
2864 static void
2865 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2866 {
2867 struct wm_softc *sc = ifp->if_softc;
2868
2869 ifmr->ifm_status = IFM_AVALID;
2870 ifmr->ifm_active = IFM_ETHER;
2871
2872 if (sc->sc_tbi_linkup == 0) {
2873 ifmr->ifm_active |= IFM_NONE;
2874 return;
2875 }
2876
2877 ifmr->ifm_status |= IFM_ACTIVE;
2878 ifmr->ifm_active |= IFM_1000_SX;
2879 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2880 ifmr->ifm_active |= IFM_FDX;
2881 }
2882
2883 /*
2884 * wm_tbi_mediachange: [ifmedia interface function]
2885 *
2886 * Set hardware to newly-selected media on a 1000BASE-X device.
2887 */
2888 static int
2889 wm_tbi_mediachange(struct ifnet *ifp)
2890 {
2891 struct wm_softc *sc = ifp->if_softc;
2892 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2893 uint32_t status;
2894 int i;
2895
2896 sc->sc_txcw = ife->ifm_data;
2897 if (sc->sc_ctrl & CTRL_RFCE)
2898 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2899 if (sc->sc_ctrl & CTRL_TFCE)
2900 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2901 sc->sc_txcw |= TXCW_ANE;
2902
2903 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2904 delay(10000);
2905
2906 sc->sc_tbi_anstate = 0;
2907
2908 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2909 /* Have signal; wait for the link to come up. */
2910 for (i = 0; i < 50; i++) {
2911 delay(10000);
2912 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2913 break;
2914 }
2915
2916 status = CSR_READ(sc, WMREG_STATUS);
2917 if (status & STATUS_LU) {
2918 /* Link is up. */
2919 DPRINTF(WM_DEBUG_LINK,
2920 ("%s: LINK: set media -> link up %s\n",
2921 sc->sc_dev.dv_xname,
2922 (status & STATUS_FD) ? "FDX" : "HDX"));
2923 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2924 if (status & STATUS_FD)
2925 sc->sc_tctl |=
2926 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2927 else
2928 sc->sc_tctl |=
2929 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2930 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2931 sc->sc_tbi_linkup = 1;
2932 } else {
2933 /* Link is down. */
2934 DPRINTF(WM_DEBUG_LINK,
2935 ("%s: LINK: set media -> link down\n",
2936 sc->sc_dev.dv_xname));
2937 sc->sc_tbi_linkup = 0;
2938 }
2939 } else {
2940 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2941 sc->sc_dev.dv_xname));
2942 sc->sc_tbi_linkup = 0;
2943 }
2944
2945 wm_tbi_set_linkled(sc);
2946
2947 return (0);
2948 }
2949
2950 /*
2951 * wm_tbi_set_linkled:
2952 *
2953 * Update the link LED on 1000BASE-X devices.
2954 */
2955 static void
2956 wm_tbi_set_linkled(struct wm_softc *sc)
2957 {
2958
2959 if (sc->sc_tbi_linkup)
2960 sc->sc_ctrl |= CTRL_SWDPIN(0);
2961 else
2962 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2963
2964 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2965 }
2966
2967 /*
2968 * wm_tbi_check_link:
2969 *
2970 * Check the link on 1000BASE-X devices.
2971 */
2972 static void
2973 wm_tbi_check_link(struct wm_softc *sc)
2974 {
2975 uint32_t rxcw, ctrl, status;
2976
2977 if (sc->sc_tbi_anstate == 0)
2978 return;
2979 else if (sc->sc_tbi_anstate > 1) {
2980 DPRINTF(WM_DEBUG_LINK,
2981 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2982 sc->sc_tbi_anstate));
2983 sc->sc_tbi_anstate--;
2984 return;
2985 }
2986
2987 sc->sc_tbi_anstate = 0;
2988
2989 rxcw = CSR_READ(sc, WMREG_RXCW);
2990 ctrl = CSR_READ(sc, WMREG_CTRL);
2991 status = CSR_READ(sc, WMREG_STATUS);
2992
2993 if ((status & STATUS_LU) == 0) {
2994 DPRINTF(WM_DEBUG_LINK,
2995 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2996 sc->sc_tbi_linkup = 0;
2997 } else {
2998 DPRINTF(WM_DEBUG_LINK,
2999 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3000 (status & STATUS_FD) ? "FDX" : "HDX"));
3001 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3002 if (status & STATUS_FD)
3003 sc->sc_tctl |=
3004 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3005 else
3006 sc->sc_tctl |=
3007 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3008 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3009 sc->sc_tbi_linkup = 1;
3010 }
3011
3012 wm_tbi_set_linkled(sc);
3013 }
3014
3015 /*
3016 * wm_gmii_reset:
3017 *
3018 * Reset the PHY.
3019 */
3020 static void
3021 wm_gmii_reset(struct wm_softc *sc)
3022 {
3023 uint32_t reg;
3024
3025 if (sc->sc_type >= WM_T_82544) {
3026 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3027 delay(20000);
3028
3029 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3030 delay(20000);
3031 } else {
3032 /* The PHY reset pin is active-low. */
3033 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3034 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3035 CTRL_EXT_SWDPIN(4));
3036 reg |= CTRL_EXT_SWDPIO(4);
3037
3038 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3039 delay(10);
3040
3041 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3042 delay(10);
3043
3044 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3045 delay(10);
3046 #if 0
3047 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3048 #endif
3049 }
3050 }
3051
3052 /*
3053 * wm_gmii_mediainit:
3054 *
3055 * Initialize media for use on 1000BASE-T devices.
3056 */
3057 static void
3058 wm_gmii_mediainit(struct wm_softc *sc)
3059 {
3060 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3061
3062 /* We have MII. */
3063 sc->sc_flags |= WM_F_HAS_MII;
3064
3065 sc->sc_tipg = TIPG_1000T_DFLT;
3066
3067 /*
3068 * Let the chip set speed/duplex on its own based on
3069 * signals from the PHY.
3070 */
3071 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3072 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3073
3074 /* Initialize our media structures and probe the GMII. */
3075 sc->sc_mii.mii_ifp = ifp;
3076
3077 if (sc->sc_type >= WM_T_82544) {
3078 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3079 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3080 } else {
3081 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3082 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3083 }
3084 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3085
3086 wm_gmii_reset(sc);
3087
3088 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3089 wm_gmii_mediastatus);
3090
3091 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3092 MII_OFFSET_ANY, 0);
3093 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3094 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3095 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3096 } else
3097 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3098 }
3099
3100 /*
3101 * wm_gmii_mediastatus: [ifmedia interface function]
3102 *
3103 * Get the current interface media status on a 1000BASE-T device.
3104 */
3105 static void
3106 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3107 {
3108 struct wm_softc *sc = ifp->if_softc;
3109
3110 mii_pollstat(&sc->sc_mii);
3111 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3112 ifmr->ifm_active = sc->sc_mii.mii_media_active;
3113 }
3114
3115 /*
3116 * wm_gmii_mediachange: [ifmedia interface function]
3117 *
3118 * Set hardware to newly-selected media on a 1000BASE-T device.
3119 */
3120 static int
3121 wm_gmii_mediachange(struct ifnet *ifp)
3122 {
3123 struct wm_softc *sc = ifp->if_softc;
3124
3125 if (ifp->if_flags & IFF_UP)
3126 mii_mediachg(&sc->sc_mii);
3127 return (0);
3128 }
3129
3130 #define MDI_IO CTRL_SWDPIN(2)
3131 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3132 #define MDI_CLK CTRL_SWDPIN(3)
3133
3134 static void
3135 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3136 {
3137 uint32_t i, v;
3138
3139 v = CSR_READ(sc, WMREG_CTRL);
3140 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3141 v |= MDI_DIR | CTRL_SWDPIO(3);
3142
3143 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3144 if (data & i)
3145 v |= MDI_IO;
3146 else
3147 v &= ~MDI_IO;
3148 CSR_WRITE(sc, WMREG_CTRL, v);
3149 delay(10);
3150 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3151 delay(10);
3152 CSR_WRITE(sc, WMREG_CTRL, v);
3153 delay(10);
3154 }
3155 }
3156
3157 static uint32_t
3158 i82543_mii_recvbits(struct wm_softc *sc)
3159 {
3160 uint32_t v, i, data = 0;
3161
3162 v = CSR_READ(sc, WMREG_CTRL);
3163 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3164 v |= CTRL_SWDPIO(3);
3165
3166 CSR_WRITE(sc, WMREG_CTRL, v);
3167 delay(10);
3168 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3169 delay(10);
3170 CSR_WRITE(sc, WMREG_CTRL, v);
3171 delay(10);
3172
3173 for (i = 0; i < 16; i++) {
3174 data <<= 1;
3175 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3176 delay(10);
3177 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3178 data |= 1;
3179 CSR_WRITE(sc, WMREG_CTRL, v);
3180 delay(10);
3181 }
3182
3183 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3184 delay(10);
3185 CSR_WRITE(sc, WMREG_CTRL, v);
3186 delay(10);
3187
3188 return (data);
3189 }
3190
3191 #undef MDI_IO
3192 #undef MDI_DIR
3193 #undef MDI_CLK
3194
3195 /*
3196 * wm_gmii_i82543_readreg: [mii interface function]
3197 *
3198 * Read a PHY register on the GMII (i82543 version).
3199 */
3200 static int
3201 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3202 {
3203 struct wm_softc *sc = (void *) self;
3204 int rv;
3205
3206 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3207 i82543_mii_sendbits(sc, reg | (phy << 5) |
3208 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3209 rv = i82543_mii_recvbits(sc) & 0xffff;
3210
3211 DPRINTF(WM_DEBUG_GMII,
3212 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3213 sc->sc_dev.dv_xname, phy, reg, rv));
3214
3215 return (rv);
3216 }
3217
3218 /*
3219 * wm_gmii_i82543_writereg: [mii interface function]
3220 *
3221 * Write a PHY register on the GMII (i82543 version).
3222 */
3223 static void
3224 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3225 {
3226 struct wm_softc *sc = (void *) self;
3227
3228 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3229 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3230 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3231 (MII_COMMAND_START << 30), 32);
3232 }
3233
3234 /*
3235 * wm_gmii_i82544_readreg: [mii interface function]
3236 *
3237 * Read a PHY register on the GMII.
3238 */
3239 static int
3240 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3241 {
3242 struct wm_softc *sc = (void *) self;
3243 uint32_t mdic;
3244 int i, rv;
3245
3246 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3247 MDIC_REGADD(reg));
3248
3249 for (i = 0; i < 100; i++) {
3250 mdic = CSR_READ(sc, WMREG_MDIC);
3251 if (mdic & MDIC_READY)
3252 break;
3253 delay(10);
3254 }
3255
3256 if ((mdic & MDIC_READY) == 0) {
3257 printf("%s: MDIC read timed out: phy %d reg %d\n",
3258 sc->sc_dev.dv_xname, phy, reg);
3259 rv = 0;
3260 } else if (mdic & MDIC_E) {
3261 #if 0 /* This is normal if no PHY is present. */
3262 printf("%s: MDIC read error: phy %d reg %d\n",
3263 sc->sc_dev.dv_xname, phy, reg);
3264 #endif
3265 rv = 0;
3266 } else {
3267 rv = MDIC_DATA(mdic);
3268 if (rv == 0xffff)
3269 rv = 0;
3270 }
3271
3272 return (rv);
3273 }
3274
3275 /*
3276 * wm_gmii_i82544_writereg: [mii interface function]
3277 *
3278 * Write a PHY register on the GMII.
3279 */
3280 static void
3281 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3282 {
3283 struct wm_softc *sc = (void *) self;
3284 uint32_t mdic;
3285 int i;
3286
3287 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3288 MDIC_REGADD(reg) | MDIC_DATA(val));
3289
3290 for (i = 0; i < 100; i++) {
3291 mdic = CSR_READ(sc, WMREG_MDIC);
3292 if (mdic & MDIC_READY)
3293 break;
3294 delay(10);
3295 }
3296
3297 if ((mdic & MDIC_READY) == 0)
3298 printf("%s: MDIC write timed out: phy %d reg %d\n",
3299 sc->sc_dev.dv_xname, phy, reg);
3300 else if (mdic & MDIC_E)
3301 printf("%s: MDIC write error: phy %d reg %d\n",
3302 sc->sc_dev.dv_xname, phy, reg);
3303 }
3304
3305 /*
3306 * wm_gmii_statchg: [mii interface function]
3307 *
3308 * Callback from MII layer when media changes.
3309 */
3310 static void
3311 wm_gmii_statchg(struct device *self)
3312 {
3313 struct wm_softc *sc = (void *) self;
3314
3315 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3316
3317 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3318 DPRINTF(WM_DEBUG_LINK,
3319 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3320 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3321 } else {
3322 DPRINTF(WM_DEBUG_LINK,
3323 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3324 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3325 }
3326
3327 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3328 }
3329