if_wm.c revision 1.76 1 /* $NetBSD: if_wm.c,v 1.76 2004/08/21 22:23:13 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Rework how parameters are loaded from the EEPROM.
44 * - Figure out what to do with the i82545GM and i82546GB
45 * SERDES controllers.
46 * - Fix hw VLAN assist.
47 */
48
49 #include <sys/cdefs.h>
50 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.76 2004/08/21 22:23:13 thorpej Exp $");
51
52 #include "bpfilter.h"
53 #include "rnd.h"
54
55 #include <sys/param.h>
56 #include <sys/systm.h>
57 #include <sys/callout.h>
58 #include <sys/mbuf.h>
59 #include <sys/malloc.h>
60 #include <sys/kernel.h>
61 #include <sys/socket.h>
62 #include <sys/ioctl.h>
63 #include <sys/errno.h>
64 #include <sys/device.h>
65 #include <sys/queue.h>
66
67 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
68
69 #if NRND > 0
70 #include <sys/rnd.h>
71 #endif
72
73 #include <net/if.h>
74 #include <net/if_dl.h>
75 #include <net/if_media.h>
76 #include <net/if_ether.h>
77
78 #if NBPFILTER > 0
79 #include <net/bpf.h>
80 #endif
81
82 #include <netinet/in.h> /* XXX for struct ip */
83 #include <netinet/in_systm.h> /* XXX for struct ip */
84 #include <netinet/ip.h> /* XXX for struct ip */
85 #include <netinet/tcp.h> /* XXX for struct tcphdr */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring on < 82544, but we use 4096
116 * on >= 82544. We tell the upper layers that they can queue a lot
117 * of packets, and we go ahead and manage up to 64 (16 for the i82547)
118 * of them at a time.
119 *
120 * We allow up to 256 (!) DMA segments per packet. Pathological packet
121 * chains containing many small mbufs have been observed in zero-copy
122 * situations with jumbo frames.
123 */
124 #define WM_NTXSEGS 256
125 #define WM_IFQUEUELEN 256
126 #define WM_TXQUEUELEN_MAX 64
127 #define WM_TXQUEUELEN_MAX_82547 16
128 #define WM_TXQUEUELEN(sc) ((sc)->sc_txnum)
129 #define WM_TXQUEUELEN_MASK(sc) (WM_TXQUEUELEN(sc) - 1)
130 #define WM_TXQUEUE_GC(sc) (WM_TXQUEUELEN(sc) / 8)
131 #define WM_NTXDESC_82542 256
132 #define WM_NTXDESC_82544 4096
133 #define WM_NTXDESC(sc) ((sc)->sc_ntxdesc)
134 #define WM_NTXDESC_MASK(sc) (WM_NTXDESC(sc) - 1)
135 #define WM_TXDESCSIZE(sc) (WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
136 #define WM_NEXTTX(sc, x) (((x) + 1) & WM_NTXDESC_MASK(sc))
137 #define WM_NEXTTXS(sc, x) (((x) + 1) & WM_TXQUEUELEN_MASK(sc))
138
139 /*
140 * Receive descriptor list size. We have one Rx buffer for normal
141 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
142 * packet. We allocate 256 receive descriptors, each with a 2k
143 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
144 */
145 #define WM_NRXDESC 256
146 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
147 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
148 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
149
150 /*
151 * Control structures are DMA'd to the i82542 chip. We allocate them in
152 * a single clump that maps to a single DMA segment to make serveral things
153 * easier.
154 */
155 struct wm_control_data_82544 {
156 /*
157 * The receive descriptors.
158 */
159 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
160
161 /*
162 * The transmit descriptors. Put these at the end, because
163 * we might use a smaller number of them.
164 */
165 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82544];
166 };
167
168 struct wm_control_data_82542 {
169 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
170 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
171 };
172
173 #define WM_CDOFF(x) offsetof(struct wm_control_data_82544, x)
174 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
175 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
176
177 /*
178 * Software state for transmit jobs.
179 */
180 struct wm_txsoft {
181 struct mbuf *txs_mbuf; /* head of our mbuf chain */
182 bus_dmamap_t txs_dmamap; /* our DMA map */
183 int txs_firstdesc; /* first descriptor in packet */
184 int txs_lastdesc; /* last descriptor in packet */
185 int txs_ndesc; /* # of descriptors used */
186 };
187
188 /*
189 * Software state for receive buffers. Each descriptor gets a
190 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
191 * more than one buffer, we chain them together.
192 */
193 struct wm_rxsoft {
194 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
195 bus_dmamap_t rxs_dmamap; /* our DMA map */
196 };
197
198 typedef enum {
199 WM_T_unknown = 0,
200 WM_T_82542_2_0, /* i82542 2.0 (really old) */
201 WM_T_82542_2_1, /* i82542 2.1+ (old) */
202 WM_T_82543, /* i82543 */
203 WM_T_82544, /* i82544 */
204 WM_T_82540, /* i82540 */
205 WM_T_82545, /* i82545 */
206 WM_T_82545_3, /* i82545 3.0+ */
207 WM_T_82546, /* i82546 */
208 WM_T_82546_3, /* i82546 3.0+ */
209 WM_T_82541, /* i82541 */
210 WM_T_82541_2, /* i82541 2.0+ */
211 WM_T_82547, /* i82547 */
212 WM_T_82547_2, /* i82547 2.0+ */
213 } wm_chip_type;
214
215 /*
216 * Software state per device.
217 */
218 struct wm_softc {
219 struct device sc_dev; /* generic device information */
220 bus_space_tag_t sc_st; /* bus space tag */
221 bus_space_handle_t sc_sh; /* bus space handle */
222 bus_space_tag_t sc_iot; /* I/O space tag */
223 bus_space_handle_t sc_ioh; /* I/O space handle */
224 bus_dma_tag_t sc_dmat; /* bus DMA tag */
225 struct ethercom sc_ethercom; /* ethernet common data */
226 void *sc_sdhook; /* shutdown hook */
227
228 wm_chip_type sc_type; /* chip type */
229 int sc_flags; /* flags; see below */
230 int sc_bus_speed; /* PCI/PCIX bus speed */
231 int sc_pcix_offset; /* PCIX capability register offset */
232 int sc_flowflags; /* 802.3x flow control flags */
233
234 void *sc_ih; /* interrupt cookie */
235
236 int sc_ee_addrbits; /* EEPROM address bits */
237
238 struct mii_data sc_mii; /* MII/media information */
239
240 struct callout sc_tick_ch; /* tick callout */
241
242 bus_dmamap_t sc_cddmamap; /* control data DMA map */
243 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
244
245 int sc_align_tweak;
246
247 /*
248 * Software state for the transmit and receive descriptors.
249 */
250 int sc_txnum; /* must be a power of two */
251 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
252 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
253
254 /*
255 * Control data structures.
256 */
257 int sc_ntxdesc; /* must be a power of two */
258 struct wm_control_data_82544 *sc_control_data;
259 #define sc_txdescs sc_control_data->wcd_txdescs
260 #define sc_rxdescs sc_control_data->wcd_rxdescs
261
262 #ifdef WM_EVENT_COUNTERS
263 /* Event counters. */
264 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
265 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
266 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
267 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
268 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
269 struct evcnt sc_ev_rxintr; /* Rx interrupts */
270 struct evcnt sc_ev_linkintr; /* Link interrupts */
271
272 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
273 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
274 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
275 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
276
277 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
278 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
279 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
280
281 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
282 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
283
284 struct evcnt sc_ev_tu; /* Tx underrun */
285
286 struct evcnt sc_ev_tx_xoff; /* Tx PAUSE(!0) frames */
287 struct evcnt sc_ev_tx_xon; /* Tx PAUSE(0) frames */
288 struct evcnt sc_ev_rx_xoff; /* Rx PAUSE(!0) frames */
289 struct evcnt sc_ev_rx_xon; /* Rx PAUSE(0) frames */
290 struct evcnt sc_ev_rx_macctl; /* Rx Unsupported */
291 #endif /* WM_EVENT_COUNTERS */
292
293 bus_addr_t sc_tdt_reg; /* offset of TDT register */
294
295 int sc_txfree; /* number of free Tx descriptors */
296 int sc_txnext; /* next ready Tx descriptor */
297
298 int sc_txsfree; /* number of free Tx jobs */
299 int sc_txsnext; /* next free Tx job */
300 int sc_txsdirty; /* dirty Tx jobs */
301
302 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
303 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
304
305 bus_addr_t sc_rdt_reg; /* offset of RDT register */
306
307 int sc_rxptr; /* next ready Rx descriptor/queue ent */
308 int sc_rxdiscard;
309 int sc_rxlen;
310 struct mbuf *sc_rxhead;
311 struct mbuf *sc_rxtail;
312 struct mbuf **sc_rxtailp;
313
314 uint32_t sc_ctrl; /* prototype CTRL register */
315 #if 0
316 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
317 #endif
318 uint32_t sc_icr; /* prototype interrupt bits */
319 uint32_t sc_tctl; /* prototype TCTL register */
320 uint32_t sc_rctl; /* prototype RCTL register */
321 uint32_t sc_txcw; /* prototype TXCW register */
322 uint32_t sc_tipg; /* prototype TIPG register */
323 uint32_t sc_fcrtl; /* prototype FCRTL register */
324
325 int sc_tbi_linkup; /* TBI link status */
326 int sc_tbi_anstate; /* autonegotiation state */
327
328 int sc_mchash_type; /* multicast filter offset */
329
330 #if NRND > 0
331 rndsource_element_t rnd_source; /* random source */
332 #endif
333 };
334
335 #define WM_RXCHAIN_RESET(sc) \
336 do { \
337 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
338 *(sc)->sc_rxtailp = NULL; \
339 (sc)->sc_rxlen = 0; \
340 } while (/*CONSTCOND*/0)
341
342 #define WM_RXCHAIN_LINK(sc, m) \
343 do { \
344 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
345 (sc)->sc_rxtailp = &(m)->m_next; \
346 } while (/*CONSTCOND*/0)
347
348 /* sc_flags */
349 #define WM_F_HAS_MII 0x01 /* has MII */
350 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
351 #define WM_F_EEPROM_SPI 0x04 /* EEPROM is SPI */
352 #define WM_F_IOH_VALID 0x10 /* I/O handle is valid */
353 #define WM_F_BUS64 0x20 /* bus is 64-bit */
354 #define WM_F_PCIX 0x40 /* bus is PCI-X */
355 #define WM_F_CSA 0x80 /* bus is CSA */
356
357 #ifdef WM_EVENT_COUNTERS
358 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
359 #define WM_EVCNT_ADD(ev, val) (ev)->ev_count += (val)
360 #else
361 #define WM_EVCNT_INCR(ev) /* nothing */
362 #define WM_EVCNT_ADD(ev, val) /* nothing */
363 #endif
364
365 #define CSR_READ(sc, reg) \
366 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
367 #define CSR_WRITE(sc, reg, val) \
368 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
369
370 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
371 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
372
373 #define WM_CDTXADDR_LO(sc, x) (WM_CDTXADDR((sc), (x)) & 0xffffffffU)
374 #define WM_CDTXADDR_HI(sc, x) \
375 (sizeof(bus_addr_t) == 8 ? \
376 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
377
378 #define WM_CDRXADDR_LO(sc, x) (WM_CDRXADDR((sc), (x)) & 0xffffffffU)
379 #define WM_CDRXADDR_HI(sc, x) \
380 (sizeof(bus_addr_t) == 8 ? \
381 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
382
383 #define WM_CDTXSYNC(sc, x, n, ops) \
384 do { \
385 int __x, __n; \
386 \
387 __x = (x); \
388 __n = (n); \
389 \
390 /* If it will wrap around, sync to the end of the ring. */ \
391 if ((__x + __n) > WM_NTXDESC(sc)) { \
392 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
393 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
394 (WM_NTXDESC(sc) - __x), (ops)); \
395 __n -= (WM_NTXDESC(sc) - __x); \
396 __x = 0; \
397 } \
398 \
399 /* Now sync whatever is left. */ \
400 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
401 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
402 } while (/*CONSTCOND*/0)
403
404 #define WM_CDRXSYNC(sc, x, ops) \
405 do { \
406 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
407 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
408 } while (/*CONSTCOND*/0)
409
410 #define WM_INIT_RXDESC(sc, x) \
411 do { \
412 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
413 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
414 struct mbuf *__m = __rxs->rxs_mbuf; \
415 \
416 /* \
417 * Note: We scoot the packet forward 2 bytes in the buffer \
418 * so that the payload after the Ethernet header is aligned \
419 * to a 4-byte boundary. \
420 * \
421 * XXX BRAINDAMAGE ALERT! \
422 * The stupid chip uses the same size for every buffer, which \
423 * is set in the Receive Control register. We are using the 2K \
424 * size option, but what we REALLY want is (2K - 2)! For this \
425 * reason, we can't "scoot" packets longer than the standard \
426 * Ethernet MTU. On strict-alignment platforms, if the total \
427 * size exceeds (2K - 2) we set align_tweak to 0 and let \
428 * the upper layer copy the headers. \
429 */ \
430 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
431 \
432 wm_set_dma_addr(&__rxd->wrx_addr, \
433 __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
434 __rxd->wrx_len = 0; \
435 __rxd->wrx_cksum = 0; \
436 __rxd->wrx_status = 0; \
437 __rxd->wrx_errors = 0; \
438 __rxd->wrx_special = 0; \
439 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
440 \
441 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
442 } while (/*CONSTCOND*/0)
443
444 static void wm_start(struct ifnet *);
445 static void wm_watchdog(struct ifnet *);
446 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
447 static int wm_init(struct ifnet *);
448 static void wm_stop(struct ifnet *, int);
449
450 static void wm_shutdown(void *);
451
452 static void wm_reset(struct wm_softc *);
453 static void wm_rxdrain(struct wm_softc *);
454 static int wm_add_rxbuf(struct wm_softc *, int);
455 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
456 static void wm_tick(void *);
457
458 static void wm_set_filter(struct wm_softc *);
459
460 static int wm_intr(void *);
461 static void wm_txintr(struct wm_softc *);
462 static void wm_rxintr(struct wm_softc *);
463 static void wm_linkintr(struct wm_softc *, uint32_t);
464
465 static void wm_tbi_mediainit(struct wm_softc *);
466 static int wm_tbi_mediachange(struct ifnet *);
467 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
468
469 static void wm_tbi_set_linkled(struct wm_softc *);
470 static void wm_tbi_check_link(struct wm_softc *);
471
472 static void wm_gmii_reset(struct wm_softc *);
473
474 static int wm_gmii_i82543_readreg(struct device *, int, int);
475 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
476
477 static int wm_gmii_i82544_readreg(struct device *, int, int);
478 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
479
480 static void wm_gmii_statchg(struct device *);
481
482 static void wm_gmii_mediainit(struct wm_softc *);
483 static int wm_gmii_mediachange(struct ifnet *);
484 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
485
486 static int wm_match(struct device *, struct cfdata *, void *);
487 static void wm_attach(struct device *, struct device *, void *);
488
489 CFATTACH_DECL(wm, sizeof(struct wm_softc),
490 wm_match, wm_attach, NULL, NULL);
491
492 /*
493 * Devices supported by this driver.
494 */
495 static const struct wm_product {
496 pci_vendor_id_t wmp_vendor;
497 pci_product_id_t wmp_product;
498 const char *wmp_name;
499 wm_chip_type wmp_type;
500 int wmp_flags;
501 #define WMP_F_1000X 0x01
502 #define WMP_F_1000T 0x02
503 } wm_products[] = {
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
505 "Intel i82542 1000BASE-X Ethernet",
506 WM_T_82542_2_1, WMP_F_1000X },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
509 "Intel i82543GC 1000BASE-X Ethernet",
510 WM_T_82543, WMP_F_1000X },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
513 "Intel i82543GC 1000BASE-T Ethernet",
514 WM_T_82543, WMP_F_1000T },
515
516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
517 "Intel i82544EI 1000BASE-T Ethernet",
518 WM_T_82544, WMP_F_1000T },
519
520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
521 "Intel i82544EI 1000BASE-X Ethernet",
522 WM_T_82544, WMP_F_1000X },
523
524 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
525 "Intel i82544GC 1000BASE-T Ethernet",
526 WM_T_82544, WMP_F_1000T },
527
528 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
529 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
530 WM_T_82544, WMP_F_1000T },
531
532 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
533 "Intel i82540EM 1000BASE-T Ethernet",
534 WM_T_82540, WMP_F_1000T },
535
536 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
537 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
538 WM_T_82540, WMP_F_1000T },
539
540 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
541 "Intel i82540EP 1000BASE-T Ethernet",
542 WM_T_82540, WMP_F_1000T },
543
544 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
545 "Intel i82540EP 1000BASE-T Ethernet",
546 WM_T_82540, WMP_F_1000T },
547
548 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
549 "Intel i82540EP 1000BASE-T Ethernet",
550 WM_T_82540, WMP_F_1000T },
551
552 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
553 "Intel i82545EM 1000BASE-T Ethernet",
554 WM_T_82545, WMP_F_1000T },
555
556 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_COPPER,
557 "Intel i82545GM 1000BASE-T Ethernet",
558 WM_T_82545_3, WMP_F_1000T },
559
560 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_FIBER,
561 "Intel i82545GM 1000BASE-X Ethernet",
562 WM_T_82545_3, WMP_F_1000X },
563 #if 0
564 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545GM_SERDES,
565 "Intel i82545GM Gigabit Ethernet (SERDES)",
566 WM_T_82545_3, WMP_F_SERDES },
567 #endif
568 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
569 "Intel i82546EB 1000BASE-T Ethernet",
570 WM_T_82546, WMP_F_1000T },
571
572 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
573 "Intel i82546EB 1000BASE-T Ethernet",
574 WM_T_82546, WMP_F_1000T },
575
576 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
577 "Intel i82545EM 1000BASE-X Ethernet",
578 WM_T_82545, WMP_F_1000X },
579
580 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
581 "Intel i82546EB 1000BASE-X Ethernet",
582 WM_T_82546, WMP_F_1000X },
583
584 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_COPPER,
585 "Intel i82546GB 1000BASE-T Ethernet",
586 WM_T_82546_3, WMP_F_1000T },
587
588 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_FIBER,
589 "Intel i82546GB 1000BASE-X Ethernet",
590 WM_T_82546_3, WMP_F_1000X },
591 #if 0
592 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546GB_SERDES,
593 "Intel i82546GB Gigabit Ethernet (SERDES)",
594 WM_T_82546_3, WMP_F_SERDES },
595 #endif
596 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI,
597 "Intel i82541EI 1000BASE-T Ethernet",
598 WM_T_82541, WMP_F_1000T },
599
600 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541EI_MOBILE,
601 "Intel i82541EI Mobile 1000BASE-T Ethernet",
602 WM_T_82541, WMP_F_1000T },
603
604 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541ER,
605 "Intel i82541ER 1000BASE-T Ethernet",
606 WM_T_82541_2, WMP_F_1000T },
607
608 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI,
609 "Intel i82541GI 1000BASE-T Ethernet",
610 WM_T_82541_2, WMP_F_1000T },
611
612 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82541GI_MOBILE,
613 "Intel i82541GI Mobile 1000BASE-T Ethernet",
614 WM_T_82541_2, WMP_F_1000T },
615
616 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547EI,
617 "Intel i82547EI 1000BASE-T Ethernet",
618 WM_T_82547, WMP_F_1000T },
619
620 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82547GI,
621 "Intel i82547GI 1000BASE-T Ethernet",
622 WM_T_82547_2, WMP_F_1000T },
623 { 0, 0,
624 NULL,
625 0, 0 },
626 };
627
628 #ifdef WM_EVENT_COUNTERS
629 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
630 #endif /* WM_EVENT_COUNTERS */
631
632 #if 0 /* Not currently used */
633 static __inline uint32_t
634 wm_io_read(struct wm_softc *sc, int reg)
635 {
636
637 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
638 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
639 }
640 #endif
641
642 static __inline void
643 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
644 {
645
646 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
647 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
648 }
649
650 static __inline void
651 wm_set_dma_addr(__volatile wiseman_addr_t *wa, bus_addr_t v)
652 {
653 wa->wa_low = htole32(v & 0xffffffffU);
654 if (sizeof(bus_addr_t) == 8)
655 wa->wa_high = htole32((uint64_t) v >> 32);
656 else
657 wa->wa_high = 0;
658 }
659
660 static const struct wm_product *
661 wm_lookup(const struct pci_attach_args *pa)
662 {
663 const struct wm_product *wmp;
664
665 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
666 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
667 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
668 return (wmp);
669 }
670 return (NULL);
671 }
672
673 static int
674 wm_match(struct device *parent, struct cfdata *cf, void *aux)
675 {
676 struct pci_attach_args *pa = aux;
677
678 if (wm_lookup(pa) != NULL)
679 return (1);
680
681 return (0);
682 }
683
684 static void
685 wm_attach(struct device *parent, struct device *self, void *aux)
686 {
687 struct wm_softc *sc = (void *) self;
688 struct pci_attach_args *pa = aux;
689 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
690 pci_chipset_tag_t pc = pa->pa_pc;
691 pci_intr_handle_t ih;
692 size_t cdata_size;
693 const char *intrstr = NULL;
694 const char *eetype;
695 bus_space_tag_t memt;
696 bus_space_handle_t memh;
697 bus_dma_segment_t seg;
698 int memh_valid;
699 int i, rseg, error;
700 const struct wm_product *wmp;
701 uint8_t enaddr[ETHER_ADDR_LEN];
702 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
703 pcireg_t preg, memtype;
704 uint32_t reg;
705 int pmreg;
706
707 callout_init(&sc->sc_tick_ch);
708
709 wmp = wm_lookup(pa);
710 if (wmp == NULL) {
711 printf("\n");
712 panic("wm_attach: impossible");
713 }
714
715 if (pci_dma64_available(pa))
716 sc->sc_dmat = pa->pa_dmat64;
717 else
718 sc->sc_dmat = pa->pa_dmat;
719
720 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
721 aprint_naive(": Ethernet controller\n");
722 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
723
724 sc->sc_type = wmp->wmp_type;
725 if (sc->sc_type < WM_T_82543) {
726 if (preg < 2) {
727 aprint_error("%s: i82542 must be at least rev. 2\n",
728 sc->sc_dev.dv_xname);
729 return;
730 }
731 if (preg < 3)
732 sc->sc_type = WM_T_82542_2_0;
733 }
734
735 /*
736 * Map the device. All devices support memory-mapped acccess,
737 * and it is really required for normal operation.
738 */
739 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
740 switch (memtype) {
741 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
742 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
743 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
744 memtype, 0, &memt, &memh, NULL, NULL) == 0);
745 break;
746 default:
747 memh_valid = 0;
748 }
749
750 if (memh_valid) {
751 sc->sc_st = memt;
752 sc->sc_sh = memh;
753 } else {
754 aprint_error("%s: unable to map device registers\n",
755 sc->sc_dev.dv_xname);
756 return;
757 }
758
759 /*
760 * In addition, i82544 and later support I/O mapped indirect
761 * register access. It is not desirable (nor supported in
762 * this driver) to use it for normal operation, though it is
763 * required to work around bugs in some chip versions.
764 */
765 if (sc->sc_type >= WM_T_82544) {
766 /* First we have to find the I/O BAR. */
767 for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
768 if (pci_mapreg_type(pa->pa_pc, pa->pa_tag, i) ==
769 PCI_MAPREG_TYPE_IO)
770 break;
771 }
772 if (i == PCI_MAPREG_END)
773 aprint_error("%s: WARNING: unable to find I/O BAR\n",
774 sc->sc_dev.dv_xname);
775 else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
776 0, &sc->sc_iot, &sc->sc_ioh,
777 NULL, NULL) == 0)
778 sc->sc_flags |= WM_F_IOH_VALID;
779 else
780 aprint_error("%s: WARNING: unable to map I/O space\n",
781 sc->sc_dev.dv_xname);
782 }
783
784 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
785 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
786 preg |= PCI_COMMAND_MASTER_ENABLE;
787 if (sc->sc_type < WM_T_82542_2_1)
788 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
789 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
790
791 /* Get it out of power save mode, if needed. */
792 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
793 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
794 PCI_PMCSR_STATE_MASK;
795 if (preg == PCI_PMCSR_STATE_D3) {
796 /*
797 * The card has lost all configuration data in
798 * this state, so punt.
799 */
800 aprint_error("%s: unable to wake from power state D3\n",
801 sc->sc_dev.dv_xname);
802 return;
803 }
804 if (preg != PCI_PMCSR_STATE_D0) {
805 aprint_normal("%s: waking up from power state D%d\n",
806 sc->sc_dev.dv_xname, preg);
807 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
808 PCI_PMCSR_STATE_D0);
809 }
810 }
811
812 /*
813 * Map and establish our interrupt.
814 */
815 if (pci_intr_map(pa, &ih)) {
816 aprint_error("%s: unable to map interrupt\n",
817 sc->sc_dev.dv_xname);
818 return;
819 }
820 intrstr = pci_intr_string(pc, ih);
821 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
822 if (sc->sc_ih == NULL) {
823 aprint_error("%s: unable to establish interrupt",
824 sc->sc_dev.dv_xname);
825 if (intrstr != NULL)
826 aprint_normal(" at %s", intrstr);
827 aprint_normal("\n");
828 return;
829 }
830 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
831
832 /*
833 * Determine a few things about the bus we're connected to.
834 */
835 if (sc->sc_type < WM_T_82543) {
836 /* We don't really know the bus characteristics here. */
837 sc->sc_bus_speed = 33;
838 } else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
839 /*
840 * CSA (Communication Streaming Architecture) is about as fast
841 * a 32-bit 66MHz PCI Bus.
842 */
843 sc->sc_flags |= WM_F_CSA;
844 sc->sc_bus_speed = 66;
845 aprint_verbose("%s: Communication Streaming Architecture\n",
846 sc->sc_dev.dv_xname);
847 } else {
848 reg = CSR_READ(sc, WMREG_STATUS);
849 if (reg & STATUS_BUS64)
850 sc->sc_flags |= WM_F_BUS64;
851 if (sc->sc_type >= WM_T_82544 &&
852 (reg & STATUS_PCIX_MODE) != 0) {
853 pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
854
855 sc->sc_flags |= WM_F_PCIX;
856 if (pci_get_capability(pa->pa_pc, pa->pa_tag,
857 PCI_CAP_PCIX,
858 &sc->sc_pcix_offset, NULL) == 0)
859 aprint_error("%s: unable to find PCIX "
860 "capability\n", sc->sc_dev.dv_xname);
861 else if (sc->sc_type != WM_T_82545_3 &&
862 sc->sc_type != WM_T_82546_3) {
863 /*
864 * Work around a problem caused by the BIOS
865 * setting the max memory read byte count
866 * incorrectly.
867 */
868 pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
869 sc->sc_pcix_offset + PCI_PCIX_CMD);
870 pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
871 sc->sc_pcix_offset + PCI_PCIX_STATUS);
872
873 bytecnt =
874 (pcix_cmd & PCI_PCIX_CMD_BYTECNT_MASK) >>
875 PCI_PCIX_CMD_BYTECNT_SHIFT;
876 maxb =
877 (pcix_sts & PCI_PCIX_STATUS_MAXB_MASK) >>
878 PCI_PCIX_STATUS_MAXB_SHIFT;
879 if (bytecnt > maxb) {
880 aprint_verbose("%s: resetting PCI-X "
881 "MMRBC: %d -> %d\n",
882 sc->sc_dev.dv_xname,
883 512 << bytecnt, 512 << maxb);
884 pcix_cmd = (pcix_cmd &
885 ~PCI_PCIX_CMD_BYTECNT_MASK) |
886 (maxb << PCI_PCIX_CMD_BYTECNT_SHIFT);
887 pci_conf_write(pa->pa_pc, pa->pa_tag,
888 sc->sc_pcix_offset + PCI_PCIX_CMD,
889 pcix_cmd);
890 }
891 }
892 }
893 /*
894 * The quad port adapter is special; it has a PCIX-PCIX
895 * bridge on the board, and can run the secondary bus at
896 * a higher speed.
897 */
898 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
899 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
900 : 66;
901 } else if (sc->sc_flags & WM_F_PCIX) {
902 switch (reg & STATUS_PCIXSPD_MASK) {
903 case STATUS_PCIXSPD_50_66:
904 sc->sc_bus_speed = 66;
905 break;
906 case STATUS_PCIXSPD_66_100:
907 sc->sc_bus_speed = 100;
908 break;
909 case STATUS_PCIXSPD_100_133:
910 sc->sc_bus_speed = 133;
911 break;
912 default:
913 aprint_error(
914 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
915 sc->sc_dev.dv_xname,
916 reg & STATUS_PCIXSPD_MASK);
917 sc->sc_bus_speed = 66;
918 }
919 } else
920 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
921 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
922 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
923 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
924 }
925
926 /*
927 * Allocate the control data structures, and create and load the
928 * DMA map for it.
929 *
930 * NOTE: All Tx descriptors must be in the same 4G segment of
931 * memory. So must Rx descriptors. We simplify by allocating
932 * both sets within the same 4G segment.
933 */
934 WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
935 WM_NTXDESC_82542 : WM_NTXDESC_82544;
936 cdata_size = sc->sc_type < WM_T_82544 ?
937 sizeof(struct wm_control_data_82542) :
938 sizeof(struct wm_control_data_82544);
939 if ((error = bus_dmamem_alloc(sc->sc_dmat, cdata_size, PAGE_SIZE,
940 (bus_size_t) 0x100000000ULL,
941 &seg, 1, &rseg, 0)) != 0) {
942 aprint_error(
943 "%s: unable to allocate control data, error = %d\n",
944 sc->sc_dev.dv_xname, error);
945 goto fail_0;
946 }
947
948 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, cdata_size,
949 (caddr_t *)&sc->sc_control_data, 0)) != 0) {
950 aprint_error("%s: unable to map control data, error = %d\n",
951 sc->sc_dev.dv_xname, error);
952 goto fail_1;
953 }
954
955 if ((error = bus_dmamap_create(sc->sc_dmat, cdata_size, 1, cdata_size,
956 0, 0, &sc->sc_cddmamap)) != 0) {
957 aprint_error("%s: unable to create control data DMA map, "
958 "error = %d\n", sc->sc_dev.dv_xname, error);
959 goto fail_2;
960 }
961
962 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
963 sc->sc_control_data, cdata_size, NULL,
964 0)) != 0) {
965 aprint_error(
966 "%s: unable to load control data DMA map, error = %d\n",
967 sc->sc_dev.dv_xname, error);
968 goto fail_3;
969 }
970
971
972 /*
973 * Create the transmit buffer DMA maps.
974 */
975 WM_TXQUEUELEN(sc) =
976 (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
977 WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
978 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
979 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
980 WM_NTXSEGS, MCLBYTES, 0, 0,
981 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
982 aprint_error("%s: unable to create Tx DMA map %d, "
983 "error = %d\n", sc->sc_dev.dv_xname, i, error);
984 goto fail_4;
985 }
986 }
987
988 /*
989 * Create the receive buffer DMA maps.
990 */
991 for (i = 0; i < WM_NRXDESC; i++) {
992 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
993 MCLBYTES, 0, 0,
994 &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
995 aprint_error("%s: unable to create Rx DMA map %d, "
996 "error = %d\n", sc->sc_dev.dv_xname, i, error);
997 goto fail_5;
998 }
999 sc->sc_rxsoft[i].rxs_mbuf = NULL;
1000 }
1001
1002 /*
1003 * Reset the chip to a known state.
1004 */
1005 wm_reset(sc);
1006
1007 /*
1008 * Get some information about the EEPROM.
1009 */
1010 if (sc->sc_type >= WM_T_82540)
1011 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1012 if (sc->sc_type <= WM_T_82544)
1013 sc->sc_ee_addrbits = 6;
1014 else if (sc->sc_type <= WM_T_82546_3) {
1015 reg = CSR_READ(sc, WMREG_EECD);
1016 if (reg & EECD_EE_SIZE)
1017 sc->sc_ee_addrbits = 8;
1018 else
1019 sc->sc_ee_addrbits = 6;
1020 } else if (sc->sc_type <= WM_T_82547_2) {
1021 reg = CSR_READ(sc, WMREG_EECD);
1022 if (reg & EECD_EE_TYPE) {
1023 sc->sc_flags |= WM_F_EEPROM_SPI;
1024 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1025 } else
1026 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1027 } else {
1028 /* Assume everything else is SPI. */
1029 reg = CSR_READ(sc, WMREG_EECD);
1030 sc->sc_flags |= WM_F_EEPROM_SPI;
1031 sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1032 }
1033 if (sc->sc_flags & WM_F_EEPROM_SPI)
1034 eetype = "SPI";
1035 else
1036 eetype = "MicroWire";
1037 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
1038 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
1039 sc->sc_ee_addrbits, eetype);
1040
1041 /*
1042 * Read the Ethernet address from the EEPROM.
1043 */
1044 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
1045 sizeof(myea) / sizeof(myea[0]), myea)) {
1046 aprint_error("%s: unable to read Ethernet address\n",
1047 sc->sc_dev.dv_xname);
1048 return;
1049 }
1050 enaddr[0] = myea[0] & 0xff;
1051 enaddr[1] = myea[0] >> 8;
1052 enaddr[2] = myea[1] & 0xff;
1053 enaddr[3] = myea[1] >> 8;
1054 enaddr[4] = myea[2] & 0xff;
1055 enaddr[5] = myea[2] >> 8;
1056
1057 /*
1058 * Toggle the LSB of the MAC address on the second port
1059 * of the i82546.
1060 */
1061 if (sc->sc_type == WM_T_82546) {
1062 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
1063 enaddr[5] ^= 1;
1064 }
1065
1066 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
1067 ether_sprintf(enaddr));
1068
1069 /*
1070 * Read the config info from the EEPROM, and set up various
1071 * bits in the control registers based on their contents.
1072 */
1073 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1074 aprint_error("%s: unable to read CFG1 from EEPROM\n",
1075 sc->sc_dev.dv_xname);
1076 return;
1077 }
1078 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1079 aprint_error("%s: unable to read CFG2 from EEPROM\n",
1080 sc->sc_dev.dv_xname);
1081 return;
1082 }
1083 if (sc->sc_type >= WM_T_82544) {
1084 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1085 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
1086 sc->sc_dev.dv_xname);
1087 return;
1088 }
1089 }
1090
1091 if (cfg1 & EEPROM_CFG1_ILOS)
1092 sc->sc_ctrl |= CTRL_ILOS;
1093 if (sc->sc_type >= WM_T_82544) {
1094 sc->sc_ctrl |=
1095 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1096 CTRL_SWDPIO_SHIFT;
1097 sc->sc_ctrl |=
1098 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1099 CTRL_SWDPINS_SHIFT;
1100 } else {
1101 sc->sc_ctrl |=
1102 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1103 CTRL_SWDPIO_SHIFT;
1104 }
1105
1106 #if 0
1107 if (sc->sc_type >= WM_T_82544) {
1108 if (cfg1 & EEPROM_CFG1_IPS0)
1109 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1110 if (cfg1 & EEPROM_CFG1_IPS1)
1111 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1112 sc->sc_ctrl_ext |=
1113 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1114 CTRL_EXT_SWDPIO_SHIFT;
1115 sc->sc_ctrl_ext |=
1116 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1117 CTRL_EXT_SWDPINS_SHIFT;
1118 } else {
1119 sc->sc_ctrl_ext |=
1120 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1121 CTRL_EXT_SWDPIO_SHIFT;
1122 }
1123 #endif
1124
1125 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1126 #if 0
1127 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1128 #endif
1129
1130 /*
1131 * Set up some register offsets that are different between
1132 * the i82542 and the i82543 and later chips.
1133 */
1134 if (sc->sc_type < WM_T_82543) {
1135 sc->sc_rdt_reg = WMREG_OLD_RDT0;
1136 sc->sc_tdt_reg = WMREG_OLD_TDT;
1137 } else {
1138 sc->sc_rdt_reg = WMREG_RDT;
1139 sc->sc_tdt_reg = WMREG_TDT;
1140 }
1141
1142 /*
1143 * Determine if we're TBI or GMII mode, and initialize the
1144 * media structures accordingly.
1145 */
1146 if (sc->sc_type < WM_T_82543 ||
1147 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1148 if (wmp->wmp_flags & WMP_F_1000T)
1149 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
1150 "product!\n", sc->sc_dev.dv_xname);
1151 wm_tbi_mediainit(sc);
1152 } else {
1153 if (wmp->wmp_flags & WMP_F_1000X)
1154 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
1155 "product!\n", sc->sc_dev.dv_xname);
1156 wm_gmii_mediainit(sc);
1157 }
1158
1159 ifp = &sc->sc_ethercom.ec_if;
1160 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
1161 ifp->if_softc = sc;
1162 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1163 ifp->if_ioctl = wm_ioctl;
1164 ifp->if_start = wm_start;
1165 ifp->if_watchdog = wm_watchdog;
1166 ifp->if_init = wm_init;
1167 ifp->if_stop = wm_stop;
1168 IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1169 IFQ_SET_READY(&ifp->if_snd);
1170
1171 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1172
1173 /*
1174 * If we're a i82543 or greater, we can support VLANs.
1175 */
1176 if (sc->sc_type >= WM_T_82543)
1177 sc->sc_ethercom.ec_capabilities |=
1178 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
1179
1180 /*
1181 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
1182 * on i82543 and later.
1183 */
1184 if (sc->sc_type >= WM_T_82543)
1185 ifp->if_capabilities |=
1186 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1187
1188 /*
1189 * Attach the interface.
1190 */
1191 if_attach(ifp);
1192 ether_ifattach(ifp, enaddr);
1193 #if NRND > 0
1194 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
1195 RND_TYPE_NET, 0);
1196 #endif
1197
1198 #ifdef WM_EVENT_COUNTERS
1199 /* Attach event counters. */
1200 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
1201 NULL, sc->sc_dev.dv_xname, "txsstall");
1202 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1203 NULL, sc->sc_dev.dv_xname, "txdstall");
1204 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1205 NULL, sc->sc_dev.dv_xname, "txforceintr");
1206 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1207 NULL, sc->sc_dev.dv_xname, "txdw");
1208 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1209 NULL, sc->sc_dev.dv_xname, "txqe");
1210 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1211 NULL, sc->sc_dev.dv_xname, "rxintr");
1212 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1213 NULL, sc->sc_dev.dv_xname, "linkintr");
1214
1215 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1216 NULL, sc->sc_dev.dv_xname, "rxipsum");
1217 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1218 NULL, sc->sc_dev.dv_xname, "rxtusum");
1219 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1220 NULL, sc->sc_dev.dv_xname, "txipsum");
1221 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1222 NULL, sc->sc_dev.dv_xname, "txtusum");
1223
1224 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1225 NULL, sc->sc_dev.dv_xname, "txctx init");
1226 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1227 NULL, sc->sc_dev.dv_xname, "txctx hit");
1228 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1229 NULL, sc->sc_dev.dv_xname, "txctx miss");
1230
1231 for (i = 0; i < WM_NTXSEGS; i++) {
1232 sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
1233 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1234 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1235 }
1236
1237 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1238 NULL, sc->sc_dev.dv_xname, "txdrop");
1239
1240 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1241 NULL, sc->sc_dev.dv_xname, "tu");
1242
1243 evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
1244 NULL, sc->sc_dev.dv_xname, "tx_xoff");
1245 evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
1246 NULL, sc->sc_dev.dv_xname, "tx_xon");
1247 evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
1248 NULL, sc->sc_dev.dv_xname, "rx_xoff");
1249 evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
1250 NULL, sc->sc_dev.dv_xname, "rx_xon");
1251 evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
1252 NULL, sc->sc_dev.dv_xname, "rx_macctl");
1253 #endif /* WM_EVENT_COUNTERS */
1254
1255 /*
1256 * Make sure the interface is shutdown during reboot.
1257 */
1258 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1259 if (sc->sc_sdhook == NULL)
1260 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1261 sc->sc_dev.dv_xname);
1262 return;
1263
1264 /*
1265 * Free any resources we've allocated during the failed attach
1266 * attempt. Do this in reverse order and fall through.
1267 */
1268 fail_5:
1269 for (i = 0; i < WM_NRXDESC; i++) {
1270 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1271 bus_dmamap_destroy(sc->sc_dmat,
1272 sc->sc_rxsoft[i].rxs_dmamap);
1273 }
1274 fail_4:
1275 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1276 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1277 bus_dmamap_destroy(sc->sc_dmat,
1278 sc->sc_txsoft[i].txs_dmamap);
1279 }
1280 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1281 fail_3:
1282 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1283 fail_2:
1284 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1285 cdata_size);
1286 fail_1:
1287 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1288 fail_0:
1289 return;
1290 }
1291
1292 /*
1293 * wm_shutdown:
1294 *
1295 * Make sure the interface is stopped at reboot time.
1296 */
1297 static void
1298 wm_shutdown(void *arg)
1299 {
1300 struct wm_softc *sc = arg;
1301
1302 wm_stop(&sc->sc_ethercom.ec_if, 1);
1303 }
1304
1305 /*
1306 * wm_tx_cksum:
1307 *
1308 * Set up TCP/IP checksumming parameters for the
1309 * specified packet.
1310 */
1311 static int
1312 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1313 uint8_t *fieldsp)
1314 {
1315 struct mbuf *m0 = txs->txs_mbuf;
1316 struct livengood_tcpip_ctxdesc *t;
1317 uint32_t ipcs, tucs;
1318 struct ip *ip;
1319 struct ether_header *eh;
1320 int offset, iphl;
1321 uint8_t fields = 0;
1322
1323 /*
1324 * XXX It would be nice if the mbuf pkthdr had offset
1325 * fields for the protocol headers.
1326 */
1327
1328 eh = mtod(m0, struct ether_header *);
1329 switch (htons(eh->ether_type)) {
1330 case ETHERTYPE_IP:
1331 iphl = sizeof(struct ip);
1332 offset = ETHER_HDR_LEN;
1333 break;
1334
1335 case ETHERTYPE_VLAN:
1336 iphl = sizeof(struct ip);
1337 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1338 break;
1339
1340 default:
1341 /*
1342 * Don't support this protocol or encapsulation.
1343 */
1344 *fieldsp = 0;
1345 *cmdp = 0;
1346 return (0);
1347 }
1348
1349 if (m0->m_len < (offset + iphl)) {
1350 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1351 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1352 "packet dropped\n", sc->sc_dev.dv_xname);
1353 return (ENOMEM);
1354 }
1355 m0 = txs->txs_mbuf;
1356 }
1357
1358 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1359 iphl = ip->ip_hl << 2;
1360
1361 /*
1362 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1363 * offload feature, if we load the context descriptor, we
1364 * MUST provide valid values for IPCSS and TUCSS fields.
1365 */
1366
1367 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1368 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1369 fields |= WTX_IXSM;
1370 ipcs = WTX_TCPIP_IPCSS(offset) |
1371 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1372 WTX_TCPIP_IPCSE(offset + iphl - 1);
1373 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1374 /* Use the cached value. */
1375 ipcs = sc->sc_txctx_ipcs;
1376 } else {
1377 /* Just initialize it to the likely value anyway. */
1378 ipcs = WTX_TCPIP_IPCSS(offset) |
1379 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1380 WTX_TCPIP_IPCSE(offset + iphl - 1);
1381 }
1382
1383 offset += iphl;
1384
1385 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1386 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1387 fields |= WTX_TXSM;
1388 tucs = WTX_TCPIP_TUCSS(offset) |
1389 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1390 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1391 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1392 /* Use the cached value. */
1393 tucs = sc->sc_txctx_tucs;
1394 } else {
1395 /* Just initialize it to a valid TCP context. */
1396 tucs = WTX_TCPIP_TUCSS(offset) |
1397 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1398 WTX_TCPIP_TUCSE(0) /* rest of packet */;
1399 }
1400
1401 if (sc->sc_txctx_ipcs == ipcs &&
1402 sc->sc_txctx_tucs == tucs) {
1403 /* Cached context is fine. */
1404 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1405 } else {
1406 /* Fill in the context descriptor. */
1407 #ifdef WM_EVENT_COUNTERS
1408 if (sc->sc_txctx_ipcs == 0xffffffff &&
1409 sc->sc_txctx_tucs == 0xffffffff)
1410 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1411 else
1412 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1413 #endif
1414 t = (struct livengood_tcpip_ctxdesc *)
1415 &sc->sc_txdescs[sc->sc_txnext];
1416 t->tcpip_ipcs = htole32(ipcs);
1417 t->tcpip_tucs = htole32(tucs);
1418 t->tcpip_cmdlen = htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1419 t->tcpip_seg = 0;
1420 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1421
1422 sc->sc_txctx_ipcs = ipcs;
1423 sc->sc_txctx_tucs = tucs;
1424
1425 sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
1426 txs->txs_ndesc++;
1427 }
1428
1429 *cmdp = WTX_CMD_DEXT | WTX_DTYP_D;
1430 *fieldsp = fields;
1431
1432 return (0);
1433 }
1434
1435 static void
1436 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
1437 {
1438 struct mbuf *m;
1439 int i;
1440
1441 printf("%s: mbuf chain:\n", sc->sc_dev.dv_xname);
1442 for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
1443 printf("\tm_data = %p, m_len = %d, m_flags = 0x%08x\n",
1444 m->m_data, m->m_len, m->m_flags);
1445 printf("\t%d mbuf%s in chain\n", i, i == 1 ? "" : "s");
1446 }
1447
1448 /*
1449 * wm_start: [ifnet interface function]
1450 *
1451 * Start packet transmission on the interface.
1452 */
1453 static void
1454 wm_start(struct ifnet *ifp)
1455 {
1456 struct wm_softc *sc = ifp->if_softc;
1457 struct mbuf *m0;
1458 #if 0 /* XXXJRT */
1459 struct m_tag *mtag;
1460 #endif
1461 struct wm_txsoft *txs;
1462 bus_dmamap_t dmamap;
1463 int error, nexttx, lasttx = -1, ofree, seg;
1464 uint32_t cksumcmd;
1465 uint8_t cksumfields;
1466
1467 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1468 return;
1469
1470 /*
1471 * Remember the previous number of free descriptors.
1472 */
1473 ofree = sc->sc_txfree;
1474
1475 /*
1476 * Loop through the send queue, setting up transmit descriptors
1477 * until we drain the queue, or use up all available transmit
1478 * descriptors.
1479 */
1480 for (;;) {
1481 /* Grab a packet off the queue. */
1482 IFQ_POLL(&ifp->if_snd, m0);
1483 if (m0 == NULL)
1484 break;
1485
1486 DPRINTF(WM_DEBUG_TX,
1487 ("%s: TX: have packet to transmit: %p\n",
1488 sc->sc_dev.dv_xname, m0));
1489
1490 /* Get a work queue entry. */
1491 if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
1492 wm_txintr(sc);
1493 if (sc->sc_txsfree == 0) {
1494 DPRINTF(WM_DEBUG_TX,
1495 ("%s: TX: no free job descriptors\n",
1496 sc->sc_dev.dv_xname));
1497 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1498 break;
1499 }
1500 }
1501
1502 txs = &sc->sc_txsoft[sc->sc_txsnext];
1503 dmamap = txs->txs_dmamap;
1504
1505 /*
1506 * Load the DMA map. If this fails, the packet either
1507 * didn't fit in the allotted number of segments, or we
1508 * were short on resources. For the too-many-segments
1509 * case, we simply report an error and drop the packet,
1510 * since we can't sanely copy a jumbo packet to a single
1511 * buffer.
1512 */
1513 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1514 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1515 if (error) {
1516 if (error == EFBIG) {
1517 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1518 printf("%s: Tx packet consumes too many "
1519 "DMA segments, dropping...\n",
1520 sc->sc_dev.dv_xname);
1521 IFQ_DEQUEUE(&ifp->if_snd, m0);
1522 wm_dump_mbuf_chain(sc, m0);
1523 m_freem(m0);
1524 continue;
1525 }
1526 /*
1527 * Short on resources, just stop for now.
1528 */
1529 DPRINTF(WM_DEBUG_TX,
1530 ("%s: TX: dmamap load failed: %d\n",
1531 sc->sc_dev.dv_xname, error));
1532 break;
1533 }
1534
1535 /*
1536 * Ensure we have enough descriptors free to describe
1537 * the packet. Note, we always reserve one descriptor
1538 * at the end of the ring due to the semantics of the
1539 * TDT register, plus one more in the event we need
1540 * to re-load checksum offload context.
1541 */
1542 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1543 /*
1544 * Not enough free descriptors to transmit this
1545 * packet. We haven't committed anything yet,
1546 * so just unload the DMA map, put the packet
1547 * pack on the queue, and punt. Notify the upper
1548 * layer that there are no more slots left.
1549 */
1550 DPRINTF(WM_DEBUG_TX,
1551 ("%s: TX: need %d descriptors, have %d\n",
1552 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1553 sc->sc_txfree - 1));
1554 ifp->if_flags |= IFF_OACTIVE;
1555 bus_dmamap_unload(sc->sc_dmat, dmamap);
1556 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1557 break;
1558 }
1559
1560 IFQ_DEQUEUE(&ifp->if_snd, m0);
1561
1562 /*
1563 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1564 */
1565
1566 /* Sync the DMA map. */
1567 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1568 BUS_DMASYNC_PREWRITE);
1569
1570 DPRINTF(WM_DEBUG_TX,
1571 ("%s: TX: packet has %d DMA segments\n",
1572 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1573
1574 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1575
1576 /*
1577 * Store a pointer to the packet so that we can free it
1578 * later.
1579 *
1580 * Initially, we consider the number of descriptors the
1581 * packet uses the number of DMA segments. This may be
1582 * incremented by 1 if we do checksum offload (a descriptor
1583 * is used to set the checksum context).
1584 */
1585 txs->txs_mbuf = m0;
1586 txs->txs_firstdesc = sc->sc_txnext;
1587 txs->txs_ndesc = dmamap->dm_nsegs;
1588
1589 /*
1590 * Set up checksum offload parameters for
1591 * this packet.
1592 */
1593 if (m0->m_pkthdr.csum_flags &
1594 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1595 if (wm_tx_cksum(sc, txs, &cksumcmd,
1596 &cksumfields) != 0) {
1597 /* Error message already displayed. */
1598 bus_dmamap_unload(sc->sc_dmat, dmamap);
1599 continue;
1600 }
1601 } else {
1602 cksumcmd = 0;
1603 cksumfields = 0;
1604 }
1605
1606 cksumcmd |= WTX_CMD_IDE;
1607
1608 /*
1609 * Initialize the transmit descriptor.
1610 */
1611 for (nexttx = sc->sc_txnext, seg = 0;
1612 seg < dmamap->dm_nsegs;
1613 seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
1614 wm_set_dma_addr(&sc->sc_txdescs[nexttx].wtx_addr,
1615 dmamap->dm_segs[seg].ds_addr);
1616 sc->sc_txdescs[nexttx].wtx_cmdlen =
1617 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len);
1618 sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
1619 sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
1620 cksumfields;
1621 sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
1622 lasttx = nexttx;
1623
1624 DPRINTF(WM_DEBUG_TX,
1625 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1626 sc->sc_dev.dv_xname, nexttx,
1627 (u_int)le32toh(dmamap->dm_segs[seg].ds_addr),
1628 (u_int)le32toh(dmamap->dm_segs[seg].ds_len)));
1629 }
1630
1631 KASSERT(lasttx != -1);
1632
1633 /*
1634 * Set up the command byte on the last descriptor of
1635 * the packet. If we're in the interrupt delay window,
1636 * delay the interrupt.
1637 */
1638 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1639 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1640
1641 #if 0 /* XXXJRT */
1642 /*
1643 * If VLANs are enabled and the packet has a VLAN tag, set
1644 * up the descriptor to encapsulate the packet for us.
1645 *
1646 * This is only valid on the last descriptor of the packet.
1647 */
1648 if (sc->sc_ethercom.ec_nvlans != 0 &&
1649 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1650 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1651 htole32(WTX_CMD_VLE);
1652 sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
1653 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1654 }
1655 #endif /* XXXJRT */
1656
1657 txs->txs_lastdesc = lasttx;
1658
1659 DPRINTF(WM_DEBUG_TX,
1660 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1661 lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
1662
1663 /* Sync the descriptors we're using. */
1664 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1665 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1666
1667 /* Give the packet to the chip. */
1668 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1669
1670 DPRINTF(WM_DEBUG_TX,
1671 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1672
1673 DPRINTF(WM_DEBUG_TX,
1674 ("%s: TX: finished transmitting packet, job %d\n",
1675 sc->sc_dev.dv_xname, sc->sc_txsnext));
1676
1677 /* Advance the tx pointer. */
1678 sc->sc_txfree -= txs->txs_ndesc;
1679 sc->sc_txnext = nexttx;
1680
1681 sc->sc_txsfree--;
1682 sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
1683
1684 #if NBPFILTER > 0
1685 /* Pass the packet to any BPF listeners. */
1686 if (ifp->if_bpf)
1687 bpf_mtap(ifp->if_bpf, m0);
1688 #endif /* NBPFILTER > 0 */
1689 }
1690
1691 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1692 /* No more slots; notify upper layer. */
1693 ifp->if_flags |= IFF_OACTIVE;
1694 }
1695
1696 if (sc->sc_txfree != ofree) {
1697 /* Set a watchdog timer in case the chip flakes out. */
1698 ifp->if_timer = 5;
1699 }
1700 }
1701
1702 /*
1703 * wm_watchdog: [ifnet interface function]
1704 *
1705 * Watchdog timer handler.
1706 */
1707 static void
1708 wm_watchdog(struct ifnet *ifp)
1709 {
1710 struct wm_softc *sc = ifp->if_softc;
1711
1712 /*
1713 * Since we're using delayed interrupts, sweep up
1714 * before we report an error.
1715 */
1716 wm_txintr(sc);
1717
1718 if (sc->sc_txfree != WM_NTXDESC(sc)) {
1719 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1720 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1721 sc->sc_txnext);
1722 ifp->if_oerrors++;
1723
1724 /* Reset the interface. */
1725 (void) wm_init(ifp);
1726 }
1727
1728 /* Try to get more packets going. */
1729 wm_start(ifp);
1730 }
1731
1732 /*
1733 * wm_ioctl: [ifnet interface function]
1734 *
1735 * Handle control requests from the operator.
1736 */
1737 static int
1738 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1739 {
1740 struct wm_softc *sc = ifp->if_softc;
1741 struct ifreq *ifr = (struct ifreq *) data;
1742 int s, error;
1743
1744 s = splnet();
1745
1746 switch (cmd) {
1747 case SIOCSIFMEDIA:
1748 case SIOCGIFMEDIA:
1749 /* Flow control requires full-duplex mode. */
1750 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
1751 (ifr->ifr_media & IFM_FDX) == 0)
1752 ifr->ifr_media &= ~IFM_ETH_FMASK;
1753 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
1754 if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
1755 /* We can do both TXPAUSE and RXPAUSE. */
1756 ifr->ifr_media |=
1757 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
1758 }
1759 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
1760 }
1761 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1762 break;
1763 default:
1764 error = ether_ioctl(ifp, cmd, data);
1765 if (error == ENETRESET) {
1766 /*
1767 * Multicast list has changed; set the hardware filter
1768 * accordingly.
1769 */
1770 wm_set_filter(sc);
1771 error = 0;
1772 }
1773 break;
1774 }
1775
1776 /* Try to get more packets going. */
1777 wm_start(ifp);
1778
1779 splx(s);
1780 return (error);
1781 }
1782
1783 /*
1784 * wm_intr:
1785 *
1786 * Interrupt service routine.
1787 */
1788 static int
1789 wm_intr(void *arg)
1790 {
1791 struct wm_softc *sc = arg;
1792 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1793 uint32_t icr;
1794 int wantinit, handled = 0;
1795
1796 for (wantinit = 0; wantinit == 0;) {
1797 icr = CSR_READ(sc, WMREG_ICR);
1798 if ((icr & sc->sc_icr) == 0)
1799 break;
1800
1801 #if 0 /*NRND > 0*/
1802 if (RND_ENABLED(&sc->rnd_source))
1803 rnd_add_uint32(&sc->rnd_source, icr);
1804 #endif
1805
1806 handled = 1;
1807
1808 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1809 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1810 DPRINTF(WM_DEBUG_RX,
1811 ("%s: RX: got Rx intr 0x%08x\n",
1812 sc->sc_dev.dv_xname,
1813 icr & (ICR_RXDMT0|ICR_RXT0)));
1814 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1815 }
1816 #endif
1817 wm_rxintr(sc);
1818
1819 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1820 if (icr & ICR_TXDW) {
1821 DPRINTF(WM_DEBUG_TX,
1822 ("%s: TX: got TXDW interrupt\n",
1823 sc->sc_dev.dv_xname));
1824 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1825 }
1826 #endif
1827 wm_txintr(sc);
1828
1829 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1830 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1831 wm_linkintr(sc, icr);
1832 }
1833
1834 if (icr & ICR_RXO) {
1835 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1836 wantinit = 1;
1837 }
1838 }
1839
1840 if (handled) {
1841 if (wantinit)
1842 wm_init(ifp);
1843
1844 /* Try to get more packets going. */
1845 wm_start(ifp);
1846 }
1847
1848 return (handled);
1849 }
1850
1851 /*
1852 * wm_txintr:
1853 *
1854 * Helper; handle transmit interrupts.
1855 */
1856 static void
1857 wm_txintr(struct wm_softc *sc)
1858 {
1859 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1860 struct wm_txsoft *txs;
1861 uint8_t status;
1862 int i;
1863
1864 ifp->if_flags &= ~IFF_OACTIVE;
1865
1866 /*
1867 * Go through the Tx list and free mbufs for those
1868 * frames which have been transmitted.
1869 */
1870 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
1871 i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
1872 txs = &sc->sc_txsoft[i];
1873
1874 DPRINTF(WM_DEBUG_TX,
1875 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1876
1877 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1878 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1879
1880 status =
1881 sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
1882 if ((status & WTX_ST_DD) == 0) {
1883 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1884 BUS_DMASYNC_PREREAD);
1885 break;
1886 }
1887
1888 DPRINTF(WM_DEBUG_TX,
1889 ("%s: TX: job %d done: descs %d..%d\n",
1890 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1891 txs->txs_lastdesc));
1892
1893 /*
1894 * XXX We should probably be using the statistics
1895 * XXX registers, but I don't know if they exist
1896 * XXX on chips before the i82544.
1897 */
1898
1899 #ifdef WM_EVENT_COUNTERS
1900 if (status & WTX_ST_TU)
1901 WM_EVCNT_INCR(&sc->sc_ev_tu);
1902 #endif /* WM_EVENT_COUNTERS */
1903
1904 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1905 ifp->if_oerrors++;
1906 if (status & WTX_ST_LC)
1907 printf("%s: late collision\n",
1908 sc->sc_dev.dv_xname);
1909 else if (status & WTX_ST_EC) {
1910 ifp->if_collisions += 16;
1911 printf("%s: excessive collisions\n",
1912 sc->sc_dev.dv_xname);
1913 }
1914 } else
1915 ifp->if_opackets++;
1916
1917 sc->sc_txfree += txs->txs_ndesc;
1918 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1919 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1920 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1921 m_freem(txs->txs_mbuf);
1922 txs->txs_mbuf = NULL;
1923 }
1924
1925 /* Update the dirty transmit buffer pointer. */
1926 sc->sc_txsdirty = i;
1927 DPRINTF(WM_DEBUG_TX,
1928 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1929
1930 /*
1931 * If there are no more pending transmissions, cancel the watchdog
1932 * timer.
1933 */
1934 if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
1935 ifp->if_timer = 0;
1936 }
1937
1938 /*
1939 * wm_rxintr:
1940 *
1941 * Helper; handle receive interrupts.
1942 */
1943 static void
1944 wm_rxintr(struct wm_softc *sc)
1945 {
1946 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1947 struct wm_rxsoft *rxs;
1948 struct mbuf *m;
1949 int i, len;
1950 uint8_t status, errors;
1951
1952 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1953 rxs = &sc->sc_rxsoft[i];
1954
1955 DPRINTF(WM_DEBUG_RX,
1956 ("%s: RX: checking descriptor %d\n",
1957 sc->sc_dev.dv_xname, i));
1958
1959 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1960
1961 status = sc->sc_rxdescs[i].wrx_status;
1962 errors = sc->sc_rxdescs[i].wrx_errors;
1963 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1964
1965 if ((status & WRX_ST_DD) == 0) {
1966 /*
1967 * We have processed all of the receive descriptors.
1968 */
1969 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1970 break;
1971 }
1972
1973 if (__predict_false(sc->sc_rxdiscard)) {
1974 DPRINTF(WM_DEBUG_RX,
1975 ("%s: RX: discarding contents of descriptor %d\n",
1976 sc->sc_dev.dv_xname, i));
1977 WM_INIT_RXDESC(sc, i);
1978 if (status & WRX_ST_EOP) {
1979 /* Reset our state. */
1980 DPRINTF(WM_DEBUG_RX,
1981 ("%s: RX: resetting rxdiscard -> 0\n",
1982 sc->sc_dev.dv_xname));
1983 sc->sc_rxdiscard = 0;
1984 }
1985 continue;
1986 }
1987
1988 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1989 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1990
1991 m = rxs->rxs_mbuf;
1992
1993 /*
1994 * Add a new receive buffer to the ring.
1995 */
1996 if (wm_add_rxbuf(sc, i) != 0) {
1997 /*
1998 * Failed, throw away what we've done so
1999 * far, and discard the rest of the packet.
2000 */
2001 ifp->if_ierrors++;
2002 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2003 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2004 WM_INIT_RXDESC(sc, i);
2005 if ((status & WRX_ST_EOP) == 0)
2006 sc->sc_rxdiscard = 1;
2007 if (sc->sc_rxhead != NULL)
2008 m_freem(sc->sc_rxhead);
2009 WM_RXCHAIN_RESET(sc);
2010 DPRINTF(WM_DEBUG_RX,
2011 ("%s: RX: Rx buffer allocation failed, "
2012 "dropping packet%s\n", sc->sc_dev.dv_xname,
2013 sc->sc_rxdiscard ? " (discard)" : ""));
2014 continue;
2015 }
2016
2017 WM_RXCHAIN_LINK(sc, m);
2018
2019 m->m_len = len;
2020
2021 DPRINTF(WM_DEBUG_RX,
2022 ("%s: RX: buffer at %p len %d\n",
2023 sc->sc_dev.dv_xname, m->m_data, len));
2024
2025 /*
2026 * If this is not the end of the packet, keep
2027 * looking.
2028 */
2029 if ((status & WRX_ST_EOP) == 0) {
2030 sc->sc_rxlen += len;
2031 DPRINTF(WM_DEBUG_RX,
2032 ("%s: RX: not yet EOP, rxlen -> %d\n",
2033 sc->sc_dev.dv_xname, sc->sc_rxlen));
2034 continue;
2035 }
2036
2037 /*
2038 * Okay, we have the entire packet now...
2039 */
2040 *sc->sc_rxtailp = NULL;
2041 m = sc->sc_rxhead;
2042 len += sc->sc_rxlen;
2043
2044 WM_RXCHAIN_RESET(sc);
2045
2046 DPRINTF(WM_DEBUG_RX,
2047 ("%s: RX: have entire packet, len -> %d\n",
2048 sc->sc_dev.dv_xname, len));
2049
2050 /*
2051 * If an error occurred, update stats and drop the packet.
2052 */
2053 if (errors &
2054 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
2055 ifp->if_ierrors++;
2056 if (errors & WRX_ER_SE)
2057 printf("%s: symbol error\n",
2058 sc->sc_dev.dv_xname);
2059 else if (errors & WRX_ER_SEQ)
2060 printf("%s: receive sequence error\n",
2061 sc->sc_dev.dv_xname);
2062 else if (errors & WRX_ER_CE)
2063 printf("%s: CRC error\n",
2064 sc->sc_dev.dv_xname);
2065 m_freem(m);
2066 continue;
2067 }
2068
2069 /*
2070 * No errors. Receive the packet.
2071 *
2072 * Note, we have configured the chip to include the
2073 * CRC with every packet.
2074 */
2075 m->m_flags |= M_HASFCS;
2076 m->m_pkthdr.rcvif = ifp;
2077 m->m_pkthdr.len = len;
2078
2079 #if 0 /* XXXJRT */
2080 /*
2081 * If VLANs are enabled, VLAN packets have been unwrapped
2082 * for us. Associate the tag with the packet.
2083 */
2084 if (sc->sc_ethercom.ec_nvlans != 0 &&
2085 (status & WRX_ST_VP) != 0) {
2086 struct m_tag *vtag;
2087
2088 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
2089 M_NOWAIT);
2090 if (vtag == NULL) {
2091 ifp->if_ierrors++;
2092 printf("%s: unable to allocate VLAN tag\n",
2093 sc->sc_dev.dv_xname);
2094 m_freem(m);
2095 continue;
2096 }
2097
2098 *(u_int *)(vtag + 1) =
2099 le16toh(sc->sc_rxdescs[i].wrx_special);
2100 }
2101 #endif /* XXXJRT */
2102
2103 /*
2104 * Set up checksum info for this packet.
2105 */
2106 if (status & WRX_ST_IPCS) {
2107 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
2108 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
2109 if (errors & WRX_ER_IPE)
2110 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
2111 }
2112 if (status & WRX_ST_TCPCS) {
2113 /*
2114 * Note: we don't know if this was TCP or UDP,
2115 * so we just set both bits, and expect the
2116 * upper layers to deal.
2117 */
2118 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
2119 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
2120 if (errors & WRX_ER_TCPE)
2121 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
2122 }
2123
2124 ifp->if_ipackets++;
2125
2126 #if NBPFILTER > 0
2127 /* Pass this up to any BPF listeners. */
2128 if (ifp->if_bpf)
2129 bpf_mtap(ifp->if_bpf, m);
2130 #endif /* NBPFILTER > 0 */
2131
2132 /* Pass it on. */
2133 (*ifp->if_input)(ifp, m);
2134 }
2135
2136 /* Update the receive pointer. */
2137 sc->sc_rxptr = i;
2138
2139 DPRINTF(WM_DEBUG_RX,
2140 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
2141 }
2142
2143 /*
2144 * wm_linkintr:
2145 *
2146 * Helper; handle link interrupts.
2147 */
2148 static void
2149 wm_linkintr(struct wm_softc *sc, uint32_t icr)
2150 {
2151 uint32_t status;
2152
2153 /*
2154 * If we get a link status interrupt on a 1000BASE-T
2155 * device, just fall into the normal MII tick path.
2156 */
2157 if (sc->sc_flags & WM_F_HAS_MII) {
2158 if (icr & ICR_LSC) {
2159 DPRINTF(WM_DEBUG_LINK,
2160 ("%s: LINK: LSC -> mii_tick\n",
2161 sc->sc_dev.dv_xname));
2162 mii_tick(&sc->sc_mii);
2163 } else if (icr & ICR_RXSEQ) {
2164 DPRINTF(WM_DEBUG_LINK,
2165 ("%s: LINK Receive sequence error\n",
2166 sc->sc_dev.dv_xname));
2167 }
2168 return;
2169 }
2170
2171 /*
2172 * If we are now receiving /C/, check for link again in
2173 * a couple of link clock ticks.
2174 */
2175 if (icr & ICR_RXCFG) {
2176 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
2177 sc->sc_dev.dv_xname));
2178 sc->sc_tbi_anstate = 2;
2179 }
2180
2181 if (icr & ICR_LSC) {
2182 status = CSR_READ(sc, WMREG_STATUS);
2183 if (status & STATUS_LU) {
2184 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
2185 sc->sc_dev.dv_xname,
2186 (status & STATUS_FD) ? "FDX" : "HDX"));
2187 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2188 sc->sc_fcrtl &= ~FCRTL_XONE;
2189 if (status & STATUS_FD)
2190 sc->sc_tctl |=
2191 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2192 else
2193 sc->sc_tctl |=
2194 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2195 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
2196 sc->sc_fcrtl |= FCRTL_XONE;
2197 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2198 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
2199 WMREG_OLD_FCRTL : WMREG_FCRTL,
2200 sc->sc_fcrtl);
2201 sc->sc_tbi_linkup = 1;
2202 } else {
2203 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
2204 sc->sc_dev.dv_xname));
2205 sc->sc_tbi_linkup = 0;
2206 }
2207 sc->sc_tbi_anstate = 2;
2208 wm_tbi_set_linkled(sc);
2209 } else if (icr & ICR_RXSEQ) {
2210 DPRINTF(WM_DEBUG_LINK,
2211 ("%s: LINK: Receive sequence error\n",
2212 sc->sc_dev.dv_xname));
2213 }
2214 }
2215
2216 /*
2217 * wm_tick:
2218 *
2219 * One second timer, used to check link status, sweep up
2220 * completed transmit jobs, etc.
2221 */
2222 static void
2223 wm_tick(void *arg)
2224 {
2225 struct wm_softc *sc = arg;
2226 int s;
2227
2228 s = splnet();
2229
2230 if (sc->sc_type >= WM_T_82542_2_1) {
2231 WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2232 WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2233 WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2234 WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2235 WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2236 }
2237
2238 if (sc->sc_flags & WM_F_HAS_MII)
2239 mii_tick(&sc->sc_mii);
2240 else
2241 wm_tbi_check_link(sc);
2242
2243 splx(s);
2244
2245 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2246 }
2247
2248 /*
2249 * wm_reset:
2250 *
2251 * Reset the i82542 chip.
2252 */
2253 static void
2254 wm_reset(struct wm_softc *sc)
2255 {
2256 int i;
2257
2258 switch (sc->sc_type) {
2259 case WM_T_82544:
2260 case WM_T_82540:
2261 case WM_T_82545:
2262 case WM_T_82546:
2263 case WM_T_82541:
2264 case WM_T_82541_2:
2265 /*
2266 * These chips have a problem with the memory-mapped
2267 * write cycle when issuing the reset, so use I/O-mapped
2268 * access, if possible.
2269 */
2270 if (sc->sc_flags & WM_F_IOH_VALID)
2271 wm_io_write(sc, WMREG_CTRL, CTRL_RST);
2272 else
2273 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2274 break;
2275
2276 case WM_T_82545_3:
2277 case WM_T_82546_3:
2278 /* Use the shadow control register on these chips. */
2279 CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
2280 break;
2281
2282 default:
2283 /* Everything else can safely use the documented method. */
2284 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2285 break;
2286 }
2287 delay(10000);
2288
2289 for (i = 0; i < 1000; i++) {
2290 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2291 return;
2292 delay(20);
2293 }
2294
2295 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2296 printf("%s: WARNING: reset failed to complete\n",
2297 sc->sc_dev.dv_xname);
2298 }
2299
2300 /*
2301 * wm_init: [ifnet interface function]
2302 *
2303 * Initialize the interface. Must be called at splnet().
2304 */
2305 static int
2306 wm_init(struct ifnet *ifp)
2307 {
2308 struct wm_softc *sc = ifp->if_softc;
2309 struct wm_rxsoft *rxs;
2310 int i, error = 0;
2311 uint32_t reg;
2312
2313 /*
2314 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2315 * There is a small but measurable benefit to avoiding the adjusment
2316 * of the descriptor so that the headers are aligned, for normal mtu,
2317 * on such platforms. One possibility is that the DMA itself is
2318 * slightly more efficient if the front of the entire packet (instead
2319 * of the front of the headers) is aligned.
2320 *
2321 * Note we must always set align_tweak to 0 if we are using
2322 * jumbo frames.
2323 */
2324 #ifdef __NO_STRICT_ALIGNMENT
2325 sc->sc_align_tweak = 0;
2326 #else
2327 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2328 sc->sc_align_tweak = 0;
2329 else
2330 sc->sc_align_tweak = 2;
2331 #endif /* __NO_STRICT_ALIGNMENT */
2332
2333 /* Cancel any pending I/O. */
2334 wm_stop(ifp, 0);
2335
2336 /* Reset the chip to a known state. */
2337 wm_reset(sc);
2338
2339 /* Initialize the transmit descriptor ring. */
2340 memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
2341 WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
2342 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2343 sc->sc_txfree = WM_NTXDESC(sc);
2344 sc->sc_txnext = 0;
2345
2346 sc->sc_txctx_ipcs = 0xffffffff;
2347 sc->sc_txctx_tucs = 0xffffffff;
2348
2349 if (sc->sc_type < WM_T_82543) {
2350 CSR_WRITE(sc, WMREG_OLD_TBDAH, WM_CDTXADDR_HI(sc, 0));
2351 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR_LO(sc, 0));
2352 CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
2353 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2354 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2355 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2356 } else {
2357 CSR_WRITE(sc, WMREG_TBDAH, WM_CDTXADDR_HI(sc, 0));
2358 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR_LO(sc, 0));
2359 CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
2360 CSR_WRITE(sc, WMREG_TDH, 0);
2361 CSR_WRITE(sc, WMREG_TDT, 0);
2362 CSR_WRITE(sc, WMREG_TIDV, 128);
2363
2364 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2365 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2366 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2367 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2368 }
2369 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2370 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2371
2372 /* Initialize the transmit job descriptors. */
2373 for (i = 0; i < WM_TXQUEUELEN(sc); i++)
2374 sc->sc_txsoft[i].txs_mbuf = NULL;
2375 sc->sc_txsfree = WM_TXQUEUELEN(sc);
2376 sc->sc_txsnext = 0;
2377 sc->sc_txsdirty = 0;
2378
2379 /*
2380 * Initialize the receive descriptor and receive job
2381 * descriptor rings.
2382 */
2383 if (sc->sc_type < WM_T_82543) {
2384 CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
2385 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
2386 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2387 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2388 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2389 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2390
2391 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2392 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2393 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2394 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2395 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2396 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2397 } else {
2398 CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
2399 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
2400 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2401 CSR_WRITE(sc, WMREG_RDH, 0);
2402 CSR_WRITE(sc, WMREG_RDT, 0);
2403 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2404 }
2405 for (i = 0; i < WM_NRXDESC; i++) {
2406 rxs = &sc->sc_rxsoft[i];
2407 if (rxs->rxs_mbuf == NULL) {
2408 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2409 printf("%s: unable to allocate or map rx "
2410 "buffer %d, error = %d\n",
2411 sc->sc_dev.dv_xname, i, error);
2412 /*
2413 * XXX Should attempt to run with fewer receive
2414 * XXX buffers instead of just failing.
2415 */
2416 wm_rxdrain(sc);
2417 goto out;
2418 }
2419 } else
2420 WM_INIT_RXDESC(sc, i);
2421 }
2422 sc->sc_rxptr = 0;
2423 sc->sc_rxdiscard = 0;
2424 WM_RXCHAIN_RESET(sc);
2425
2426 /*
2427 * Clear out the VLAN table -- we don't use it (yet).
2428 */
2429 CSR_WRITE(sc, WMREG_VET, 0);
2430 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2431 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2432
2433 /*
2434 * Set up flow-control parameters.
2435 *
2436 * XXX Values could probably stand some tuning.
2437 */
2438 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2439 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2440 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2441
2442 sc->sc_fcrtl = FCRTL_DFLT;
2443 if (sc->sc_type < WM_T_82543) {
2444 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2445 CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
2446 } else {
2447 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2448 CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
2449 }
2450 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2451
2452 #if 0 /* XXXJRT */
2453 /* Deal with VLAN enables. */
2454 if (sc->sc_ethercom.ec_nvlans != 0)
2455 sc->sc_ctrl |= CTRL_VME;
2456 else
2457 #endif /* XXXJRT */
2458 sc->sc_ctrl &= ~CTRL_VME;
2459
2460 /* Write the control registers. */
2461 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2462 #if 0
2463 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2464 #endif
2465
2466 /*
2467 * Set up checksum offload parameters.
2468 */
2469 reg = CSR_READ(sc, WMREG_RXCSUM);
2470 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2471 reg |= RXCSUM_IPOFL;
2472 else
2473 reg &= ~RXCSUM_IPOFL;
2474 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2475 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2476 else {
2477 reg &= ~RXCSUM_TUOFL;
2478 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2479 reg &= ~RXCSUM_IPOFL;
2480 }
2481 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2482
2483 /*
2484 * Set up the interrupt registers.
2485 */
2486 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2487 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2488 ICR_RXO | ICR_RXT0;
2489 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2490 sc->sc_icr |= ICR_RXCFG;
2491 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2492
2493 /* Set up the inter-packet gap. */
2494 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2495
2496 #if 0 /* XXXJRT */
2497 /* Set the VLAN ethernetype. */
2498 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2499 #endif
2500
2501 /*
2502 * Set up the transmit control register; we start out with
2503 * a collision distance suitable for FDX, but update it whe
2504 * we resolve the media type.
2505 */
2506 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2507 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2508 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2509
2510 /* Set the media. */
2511 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2512
2513 /*
2514 * Set up the receive control register; we actually program
2515 * the register when we set the receive filter. Use multicast
2516 * address offset type 0.
2517 *
2518 * Only the i82544 has the ability to strip the incoming
2519 * CRC, so we don't enable that feature.
2520 */
2521 sc->sc_mchash_type = 0;
2522 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2523 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2524
2525 if(MCLBYTES == 2048) {
2526 sc->sc_rctl |= RCTL_2k;
2527 } else {
2528 if(sc->sc_type >= WM_T_82543) {
2529 switch(MCLBYTES) {
2530 case 4096:
2531 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2532 break;
2533 case 8192:
2534 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2535 break;
2536 case 16384:
2537 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2538 break;
2539 default:
2540 panic("wm_init: MCLBYTES %d unsupported",
2541 MCLBYTES);
2542 break;
2543 }
2544 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2545 }
2546
2547 /* Set the receive filter. */
2548 wm_set_filter(sc);
2549
2550 /* Start the one second link check clock. */
2551 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2552
2553 /* ...all done! */
2554 ifp->if_flags |= IFF_RUNNING;
2555 ifp->if_flags &= ~IFF_OACTIVE;
2556
2557 out:
2558 if (error)
2559 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2560 return (error);
2561 }
2562
2563 /*
2564 * wm_rxdrain:
2565 *
2566 * Drain the receive queue.
2567 */
2568 static void
2569 wm_rxdrain(struct wm_softc *sc)
2570 {
2571 struct wm_rxsoft *rxs;
2572 int i;
2573
2574 for (i = 0; i < WM_NRXDESC; i++) {
2575 rxs = &sc->sc_rxsoft[i];
2576 if (rxs->rxs_mbuf != NULL) {
2577 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2578 m_freem(rxs->rxs_mbuf);
2579 rxs->rxs_mbuf = NULL;
2580 }
2581 }
2582 }
2583
2584 /*
2585 * wm_stop: [ifnet interface function]
2586 *
2587 * Stop transmission on the interface.
2588 */
2589 static void
2590 wm_stop(struct ifnet *ifp, int disable)
2591 {
2592 struct wm_softc *sc = ifp->if_softc;
2593 struct wm_txsoft *txs;
2594 int i;
2595
2596 /* Stop the one second clock. */
2597 callout_stop(&sc->sc_tick_ch);
2598
2599 if (sc->sc_flags & WM_F_HAS_MII) {
2600 /* Down the MII. */
2601 mii_down(&sc->sc_mii);
2602 }
2603
2604 /* Stop the transmit and receive processes. */
2605 CSR_WRITE(sc, WMREG_TCTL, 0);
2606 CSR_WRITE(sc, WMREG_RCTL, 0);
2607
2608 /* Release any queued transmit buffers. */
2609 for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2610 txs = &sc->sc_txsoft[i];
2611 if (txs->txs_mbuf != NULL) {
2612 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2613 m_freem(txs->txs_mbuf);
2614 txs->txs_mbuf = NULL;
2615 }
2616 }
2617
2618 if (disable)
2619 wm_rxdrain(sc);
2620
2621 /* Mark the interface as down and cancel the watchdog timer. */
2622 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2623 ifp->if_timer = 0;
2624 }
2625
2626 /*
2627 * wm_acquire_eeprom:
2628 *
2629 * Perform the EEPROM handshake required on some chips.
2630 */
2631 static int
2632 wm_acquire_eeprom(struct wm_softc *sc)
2633 {
2634 uint32_t reg;
2635 int x;
2636
2637 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2638 reg = CSR_READ(sc, WMREG_EECD);
2639
2640 /* Request EEPROM access. */
2641 reg |= EECD_EE_REQ;
2642 CSR_WRITE(sc, WMREG_EECD, reg);
2643
2644 /* ..and wait for it to be granted. */
2645 for (x = 0; x < 100; x++) {
2646 reg = CSR_READ(sc, WMREG_EECD);
2647 if (reg & EECD_EE_GNT)
2648 break;
2649 delay(5);
2650 }
2651 if ((reg & EECD_EE_GNT) == 0) {
2652 aprint_error("%s: could not acquire EEPROM GNT\n",
2653 sc->sc_dev.dv_xname);
2654 reg &= ~EECD_EE_REQ;
2655 CSR_WRITE(sc, WMREG_EECD, reg);
2656 return (1);
2657 }
2658 }
2659
2660 return (0);
2661 }
2662
2663 /*
2664 * wm_release_eeprom:
2665 *
2666 * Release the EEPROM mutex.
2667 */
2668 static void
2669 wm_release_eeprom(struct wm_softc *sc)
2670 {
2671 uint32_t reg;
2672
2673 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2674 reg = CSR_READ(sc, WMREG_EECD);
2675 reg &= ~EECD_EE_REQ;
2676 CSR_WRITE(sc, WMREG_EECD, reg);
2677 }
2678 }
2679
2680 /*
2681 * wm_eeprom_sendbits:
2682 *
2683 * Send a series of bits to the EEPROM.
2684 */
2685 static void
2686 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2687 {
2688 uint32_t reg;
2689 int x;
2690
2691 reg = CSR_READ(sc, WMREG_EECD);
2692
2693 for (x = nbits; x > 0; x--) {
2694 if (bits & (1U << (x - 1)))
2695 reg |= EECD_DI;
2696 else
2697 reg &= ~EECD_DI;
2698 CSR_WRITE(sc, WMREG_EECD, reg);
2699 delay(2);
2700 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2701 delay(2);
2702 CSR_WRITE(sc, WMREG_EECD, reg);
2703 delay(2);
2704 }
2705 }
2706
2707 /*
2708 * wm_eeprom_recvbits:
2709 *
2710 * Receive a series of bits from the EEPROM.
2711 */
2712 static void
2713 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2714 {
2715 uint32_t reg, val;
2716 int x;
2717
2718 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2719
2720 val = 0;
2721 for (x = nbits; x > 0; x--) {
2722 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2723 delay(2);
2724 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2725 val |= (1U << (x - 1));
2726 CSR_WRITE(sc, WMREG_EECD, reg);
2727 delay(2);
2728 }
2729 *valp = val;
2730 }
2731
2732 /*
2733 * wm_read_eeprom_uwire:
2734 *
2735 * Read a word from the EEPROM using the MicroWire protocol.
2736 */
2737 static int
2738 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2739 {
2740 uint32_t reg, val;
2741 int i;
2742
2743 for (i = 0; i < wordcnt; i++) {
2744 /* Clear SK and DI. */
2745 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2746 CSR_WRITE(sc, WMREG_EECD, reg);
2747
2748 /* Set CHIP SELECT. */
2749 reg |= EECD_CS;
2750 CSR_WRITE(sc, WMREG_EECD, reg);
2751 delay(2);
2752
2753 /* Shift in the READ command. */
2754 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2755
2756 /* Shift in address. */
2757 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2758
2759 /* Shift out the data. */
2760 wm_eeprom_recvbits(sc, &val, 16);
2761 data[i] = val & 0xffff;
2762
2763 /* Clear CHIP SELECT. */
2764 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2765 CSR_WRITE(sc, WMREG_EECD, reg);
2766 delay(2);
2767 }
2768
2769 return (0);
2770 }
2771
2772 /*
2773 * wm_spi_eeprom_ready:
2774 *
2775 * Wait for a SPI EEPROM to be ready for commands.
2776 */
2777 static int
2778 wm_spi_eeprom_ready(struct wm_softc *sc)
2779 {
2780 uint32_t val;
2781 int usec;
2782
2783 for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
2784 wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
2785 wm_eeprom_recvbits(sc, &val, 8);
2786 if ((val & SPI_SR_RDY) == 0)
2787 break;
2788 }
2789 if (usec >= SPI_MAX_RETRIES) {
2790 aprint_error("%s: EEPROM failed to become ready\n",
2791 sc->sc_dev.dv_xname);
2792 return (1);
2793 }
2794 return (0);
2795 }
2796
2797 /*
2798 * wm_read_eeprom_spi:
2799 *
2800 * Read a work from the EEPROM using the SPI protocol.
2801 */
2802 static int
2803 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2804 {
2805 uint32_t reg, val;
2806 int i;
2807 uint8_t opc;
2808
2809 /* Clear SK and CS. */
2810 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
2811 CSR_WRITE(sc, WMREG_EECD, reg);
2812 delay(2);
2813
2814 if (wm_spi_eeprom_ready(sc))
2815 return (1);
2816
2817 /* Toggle CS to flush commands. */
2818 CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
2819 delay(2);
2820 CSR_WRITE(sc, WMREG_EECD, reg);
2821 delay(2);
2822
2823 opc = SPI_OPC_READ;
2824 if (sc->sc_ee_addrbits == 8 && word >= 128)
2825 opc |= SPI_OPC_A8;
2826
2827 wm_eeprom_sendbits(sc, opc, 8);
2828 wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
2829
2830 for (i = 0; i < wordcnt; i++) {
2831 wm_eeprom_recvbits(sc, &val, 16);
2832 data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
2833 }
2834
2835 /* Raise CS and clear SK. */
2836 reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
2837 CSR_WRITE(sc, WMREG_EECD, reg);
2838 delay(2);
2839
2840 return (0);
2841 }
2842
2843 /*
2844 * wm_read_eeprom:
2845 *
2846 * Read data from the serial EEPROM.
2847 */
2848 static int
2849 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2850 {
2851 int rv;
2852
2853 if (wm_acquire_eeprom(sc))
2854 return (1);
2855
2856 if (sc->sc_flags & WM_F_EEPROM_SPI)
2857 rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
2858 else
2859 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2860
2861 wm_release_eeprom(sc);
2862 return (rv);
2863 }
2864
2865 /*
2866 * wm_add_rxbuf:
2867 *
2868 * Add a receive buffer to the indiciated descriptor.
2869 */
2870 static int
2871 wm_add_rxbuf(struct wm_softc *sc, int idx)
2872 {
2873 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2874 struct mbuf *m;
2875 int error;
2876
2877 MGETHDR(m, M_DONTWAIT, MT_DATA);
2878 if (m == NULL)
2879 return (ENOBUFS);
2880
2881 MCLGET(m, M_DONTWAIT);
2882 if ((m->m_flags & M_EXT) == 0) {
2883 m_freem(m);
2884 return (ENOBUFS);
2885 }
2886
2887 if (rxs->rxs_mbuf != NULL)
2888 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2889
2890 rxs->rxs_mbuf = m;
2891
2892 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2893 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2894 BUS_DMA_READ|BUS_DMA_NOWAIT);
2895 if (error) {
2896 printf("%s: unable to load rx DMA map %d, error = %d\n",
2897 sc->sc_dev.dv_xname, idx, error);
2898 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2899 }
2900
2901 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2902 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2903
2904 WM_INIT_RXDESC(sc, idx);
2905
2906 return (0);
2907 }
2908
2909 /*
2910 * wm_set_ral:
2911 *
2912 * Set an entery in the receive address list.
2913 */
2914 static void
2915 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2916 {
2917 uint32_t ral_lo, ral_hi;
2918
2919 if (enaddr != NULL) {
2920 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2921 (enaddr[3] << 24);
2922 ral_hi = enaddr[4] | (enaddr[5] << 8);
2923 ral_hi |= RAL_AV;
2924 } else {
2925 ral_lo = 0;
2926 ral_hi = 0;
2927 }
2928
2929 if (sc->sc_type >= WM_T_82544) {
2930 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2931 ral_lo);
2932 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2933 ral_hi);
2934 } else {
2935 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2936 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2937 }
2938 }
2939
2940 /*
2941 * wm_mchash:
2942 *
2943 * Compute the hash of the multicast address for the 4096-bit
2944 * multicast filter.
2945 */
2946 static uint32_t
2947 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2948 {
2949 static const int lo_shift[4] = { 4, 3, 2, 0 };
2950 static const int hi_shift[4] = { 4, 5, 6, 8 };
2951 uint32_t hash;
2952
2953 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2954 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2955
2956 return (hash & 0xfff);
2957 }
2958
2959 /*
2960 * wm_set_filter:
2961 *
2962 * Set up the receive filter.
2963 */
2964 static void
2965 wm_set_filter(struct wm_softc *sc)
2966 {
2967 struct ethercom *ec = &sc->sc_ethercom;
2968 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2969 struct ether_multi *enm;
2970 struct ether_multistep step;
2971 bus_addr_t mta_reg;
2972 uint32_t hash, reg, bit;
2973 int i;
2974
2975 if (sc->sc_type >= WM_T_82544)
2976 mta_reg = WMREG_CORDOVA_MTA;
2977 else
2978 mta_reg = WMREG_MTA;
2979
2980 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2981
2982 if (ifp->if_flags & IFF_BROADCAST)
2983 sc->sc_rctl |= RCTL_BAM;
2984 if (ifp->if_flags & IFF_PROMISC) {
2985 sc->sc_rctl |= RCTL_UPE;
2986 goto allmulti;
2987 }
2988
2989 /*
2990 * Set the station address in the first RAL slot, and
2991 * clear the remaining slots.
2992 */
2993 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2994 for (i = 1; i < WM_RAL_TABSIZE; i++)
2995 wm_set_ral(sc, NULL, i);
2996
2997 /* Clear out the multicast table. */
2998 for (i = 0; i < WM_MC_TABSIZE; i++)
2999 CSR_WRITE(sc, mta_reg + (i << 2), 0);
3000
3001 ETHER_FIRST_MULTI(step, ec, enm);
3002 while (enm != NULL) {
3003 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3004 /*
3005 * We must listen to a range of multicast addresses.
3006 * For now, just accept all multicasts, rather than
3007 * trying to set only those filter bits needed to match
3008 * the range. (At this time, the only use of address
3009 * ranges is for IP multicast routing, for which the
3010 * range is big enough to require all bits set.)
3011 */
3012 goto allmulti;
3013 }
3014
3015 hash = wm_mchash(sc, enm->enm_addrlo);
3016
3017 reg = (hash >> 5) & 0x7f;
3018 bit = hash & 0x1f;
3019
3020 hash = CSR_READ(sc, mta_reg + (reg << 2));
3021 hash |= 1U << bit;
3022
3023 /* XXX Hardware bug?? */
3024 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
3025 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3026 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3027 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3028 } else
3029 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3030
3031 ETHER_NEXT_MULTI(step, enm);
3032 }
3033
3034 ifp->if_flags &= ~IFF_ALLMULTI;
3035 goto setit;
3036
3037 allmulti:
3038 ifp->if_flags |= IFF_ALLMULTI;
3039 sc->sc_rctl |= RCTL_MPE;
3040
3041 setit:
3042 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3043 }
3044
3045 /*
3046 * wm_tbi_mediainit:
3047 *
3048 * Initialize media for use on 1000BASE-X devices.
3049 */
3050 static void
3051 wm_tbi_mediainit(struct wm_softc *sc)
3052 {
3053 const char *sep = "";
3054
3055 if (sc->sc_type < WM_T_82543)
3056 sc->sc_tipg = TIPG_WM_DFLT;
3057 else
3058 sc->sc_tipg = TIPG_LG_DFLT;
3059
3060 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
3061 wm_tbi_mediastatus);
3062
3063 /*
3064 * SWD Pins:
3065 *
3066 * 0 = Link LED (output)
3067 * 1 = Loss Of Signal (input)
3068 */
3069 sc->sc_ctrl |= CTRL_SWDPIO(0);
3070 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
3071
3072 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3073
3074 #define ADD(ss, mm, dd) \
3075 do { \
3076 printf("%s%s", sep, ss); \
3077 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
3078 sep = ", "; \
3079 } while (/*CONSTCOND*/0)
3080
3081 printf("%s: ", sc->sc_dev.dv_xname);
3082 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
3083 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
3084 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
3085 printf("\n");
3086
3087 #undef ADD
3088
3089 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3090 }
3091
3092 /*
3093 * wm_tbi_mediastatus: [ifmedia interface function]
3094 *
3095 * Get the current interface media status on a 1000BASE-X device.
3096 */
3097 static void
3098 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3099 {
3100 struct wm_softc *sc = ifp->if_softc;
3101 uint32_t ctrl;
3102
3103 ifmr->ifm_status = IFM_AVALID;
3104 ifmr->ifm_active = IFM_ETHER;
3105
3106 if (sc->sc_tbi_linkup == 0) {
3107 ifmr->ifm_active |= IFM_NONE;
3108 return;
3109 }
3110
3111 ifmr->ifm_status |= IFM_ACTIVE;
3112 ifmr->ifm_active |= IFM_1000_SX;
3113 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
3114 ifmr->ifm_active |= IFM_FDX;
3115 ctrl = CSR_READ(sc, WMREG_CTRL);
3116 if (ctrl & CTRL_RFCE)
3117 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
3118 if (ctrl & CTRL_TFCE)
3119 ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
3120 }
3121
3122 /*
3123 * wm_tbi_mediachange: [ifmedia interface function]
3124 *
3125 * Set hardware to newly-selected media on a 1000BASE-X device.
3126 */
3127 static int
3128 wm_tbi_mediachange(struct ifnet *ifp)
3129 {
3130 struct wm_softc *sc = ifp->if_softc;
3131 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
3132 uint32_t status;
3133 int i;
3134
3135 sc->sc_txcw = ife->ifm_data;
3136 if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
3137 (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
3138 sc->sc_txcw |= ANAR_X_PAUSE_SYM | ANAR_X_PAUSE_ASYM;
3139 sc->sc_txcw |= TXCW_ANE;
3140
3141 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
3142 delay(10000);
3143
3144 /* NOTE: CTRL will update TFCE and RFCE automatically. */
3145
3146 sc->sc_tbi_anstate = 0;
3147
3148 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
3149 /* Have signal; wait for the link to come up. */
3150 for (i = 0; i < 50; i++) {
3151 delay(10000);
3152 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
3153 break;
3154 }
3155
3156 status = CSR_READ(sc, WMREG_STATUS);
3157 if (status & STATUS_LU) {
3158 /* Link is up. */
3159 DPRINTF(WM_DEBUG_LINK,
3160 ("%s: LINK: set media -> link up %s\n",
3161 sc->sc_dev.dv_xname,
3162 (status & STATUS_FD) ? "FDX" : "HDX"));
3163 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3164 sc->sc_fcrtl &= ~FCRTL_XONE;
3165 if (status & STATUS_FD)
3166 sc->sc_tctl |=
3167 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3168 else
3169 sc->sc_tctl |=
3170 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3171 if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
3172 sc->sc_fcrtl |= FCRTL_XONE;
3173 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3174 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3175 WMREG_OLD_FCRTL : WMREG_FCRTL,
3176 sc->sc_fcrtl);
3177 sc->sc_tbi_linkup = 1;
3178 } else {
3179 /* Link is down. */
3180 DPRINTF(WM_DEBUG_LINK,
3181 ("%s: LINK: set media -> link down\n",
3182 sc->sc_dev.dv_xname));
3183 sc->sc_tbi_linkup = 0;
3184 }
3185 } else {
3186 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
3187 sc->sc_dev.dv_xname));
3188 sc->sc_tbi_linkup = 0;
3189 }
3190
3191 wm_tbi_set_linkled(sc);
3192
3193 return (0);
3194 }
3195
3196 /*
3197 * wm_tbi_set_linkled:
3198 *
3199 * Update the link LED on 1000BASE-X devices.
3200 */
3201 static void
3202 wm_tbi_set_linkled(struct wm_softc *sc)
3203 {
3204
3205 if (sc->sc_tbi_linkup)
3206 sc->sc_ctrl |= CTRL_SWDPIN(0);
3207 else
3208 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
3209
3210 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3211 }
3212
3213 /*
3214 * wm_tbi_check_link:
3215 *
3216 * Check the link on 1000BASE-X devices.
3217 */
3218 static void
3219 wm_tbi_check_link(struct wm_softc *sc)
3220 {
3221 uint32_t rxcw, ctrl, status;
3222
3223 if (sc->sc_tbi_anstate == 0)
3224 return;
3225 else if (sc->sc_tbi_anstate > 1) {
3226 DPRINTF(WM_DEBUG_LINK,
3227 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
3228 sc->sc_tbi_anstate));
3229 sc->sc_tbi_anstate--;
3230 return;
3231 }
3232
3233 sc->sc_tbi_anstate = 0;
3234
3235 rxcw = CSR_READ(sc, WMREG_RXCW);
3236 ctrl = CSR_READ(sc, WMREG_CTRL);
3237 status = CSR_READ(sc, WMREG_STATUS);
3238
3239 if ((status & STATUS_LU) == 0) {
3240 DPRINTF(WM_DEBUG_LINK,
3241 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
3242 sc->sc_tbi_linkup = 0;
3243 } else {
3244 DPRINTF(WM_DEBUG_LINK,
3245 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
3246 (status & STATUS_FD) ? "FDX" : "HDX"));
3247 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3248 sc->sc_fcrtl &= ~FCRTL_XONE;
3249 if (status & STATUS_FD)
3250 sc->sc_tctl |=
3251 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3252 else
3253 sc->sc_tctl |=
3254 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3255 if (ctrl & CTRL_TFCE)
3256 sc->sc_fcrtl |= FCRTL_XONE;
3257 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3258 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3259 WMREG_OLD_FCRTL : WMREG_FCRTL,
3260 sc->sc_fcrtl);
3261 sc->sc_tbi_linkup = 1;
3262 }
3263
3264 wm_tbi_set_linkled(sc);
3265 }
3266
3267 /*
3268 * wm_gmii_reset:
3269 *
3270 * Reset the PHY.
3271 */
3272 static void
3273 wm_gmii_reset(struct wm_softc *sc)
3274 {
3275 uint32_t reg;
3276
3277 if (sc->sc_type >= WM_T_82544) {
3278 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
3279 delay(20000);
3280
3281 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3282 delay(20000);
3283 } else {
3284 /* The PHY reset pin is active-low. */
3285 reg = CSR_READ(sc, WMREG_CTRL_EXT);
3286 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
3287 CTRL_EXT_SWDPIN(4));
3288 reg |= CTRL_EXT_SWDPIO(4);
3289
3290 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3291 delay(10);
3292
3293 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3294 delay(10);
3295
3296 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
3297 delay(10);
3298 #if 0
3299 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
3300 #endif
3301 }
3302 }
3303
3304 /*
3305 * wm_gmii_mediainit:
3306 *
3307 * Initialize media for use on 1000BASE-T devices.
3308 */
3309 static void
3310 wm_gmii_mediainit(struct wm_softc *sc)
3311 {
3312 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3313
3314 /* We have MII. */
3315 sc->sc_flags |= WM_F_HAS_MII;
3316
3317 sc->sc_tipg = TIPG_1000T_DFLT;
3318
3319 /*
3320 * Let the chip set speed/duplex on its own based on
3321 * signals from the PHY.
3322 */
3323 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
3324 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3325
3326 /* Initialize our media structures and probe the GMII. */
3327 sc->sc_mii.mii_ifp = ifp;
3328
3329 if (sc->sc_type >= WM_T_82544) {
3330 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
3331 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
3332 } else {
3333 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
3334 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
3335 }
3336 sc->sc_mii.mii_statchg = wm_gmii_statchg;
3337
3338 wm_gmii_reset(sc);
3339
3340 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
3341 wm_gmii_mediastatus);
3342
3343 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
3344 MII_OFFSET_ANY, MIIF_DOPAUSE);
3345 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
3346 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
3347 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
3348 } else
3349 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
3350 }
3351
3352 /*
3353 * wm_gmii_mediastatus: [ifmedia interface function]
3354 *
3355 * Get the current interface media status on a 1000BASE-T device.
3356 */
3357 static void
3358 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
3359 {
3360 struct wm_softc *sc = ifp->if_softc;
3361
3362 mii_pollstat(&sc->sc_mii);
3363 ifmr->ifm_status = sc->sc_mii.mii_media_status;
3364 ifmr->ifm_active = (sc->sc_mii.mii_media_active & ~IFM_ETH_FMASK) |
3365 sc->sc_flowflags;
3366 }
3367
3368 /*
3369 * wm_gmii_mediachange: [ifmedia interface function]
3370 *
3371 * Set hardware to newly-selected media on a 1000BASE-T device.
3372 */
3373 static int
3374 wm_gmii_mediachange(struct ifnet *ifp)
3375 {
3376 struct wm_softc *sc = ifp->if_softc;
3377
3378 if (ifp->if_flags & IFF_UP)
3379 mii_mediachg(&sc->sc_mii);
3380 return (0);
3381 }
3382
3383 #define MDI_IO CTRL_SWDPIN(2)
3384 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3385 #define MDI_CLK CTRL_SWDPIN(3)
3386
3387 static void
3388 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3389 {
3390 uint32_t i, v;
3391
3392 v = CSR_READ(sc, WMREG_CTRL);
3393 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3394 v |= MDI_DIR | CTRL_SWDPIO(3);
3395
3396 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3397 if (data & i)
3398 v |= MDI_IO;
3399 else
3400 v &= ~MDI_IO;
3401 CSR_WRITE(sc, WMREG_CTRL, v);
3402 delay(10);
3403 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3404 delay(10);
3405 CSR_WRITE(sc, WMREG_CTRL, v);
3406 delay(10);
3407 }
3408 }
3409
3410 static uint32_t
3411 i82543_mii_recvbits(struct wm_softc *sc)
3412 {
3413 uint32_t v, i, data = 0;
3414
3415 v = CSR_READ(sc, WMREG_CTRL);
3416 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3417 v |= CTRL_SWDPIO(3);
3418
3419 CSR_WRITE(sc, WMREG_CTRL, v);
3420 delay(10);
3421 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3422 delay(10);
3423 CSR_WRITE(sc, WMREG_CTRL, v);
3424 delay(10);
3425
3426 for (i = 0; i < 16; i++) {
3427 data <<= 1;
3428 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3429 delay(10);
3430 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3431 data |= 1;
3432 CSR_WRITE(sc, WMREG_CTRL, v);
3433 delay(10);
3434 }
3435
3436 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3437 delay(10);
3438 CSR_WRITE(sc, WMREG_CTRL, v);
3439 delay(10);
3440
3441 return (data);
3442 }
3443
3444 #undef MDI_IO
3445 #undef MDI_DIR
3446 #undef MDI_CLK
3447
3448 /*
3449 * wm_gmii_i82543_readreg: [mii interface function]
3450 *
3451 * Read a PHY register on the GMII (i82543 version).
3452 */
3453 static int
3454 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3455 {
3456 struct wm_softc *sc = (void *) self;
3457 int rv;
3458
3459 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3460 i82543_mii_sendbits(sc, reg | (phy << 5) |
3461 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3462 rv = i82543_mii_recvbits(sc) & 0xffff;
3463
3464 DPRINTF(WM_DEBUG_GMII,
3465 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3466 sc->sc_dev.dv_xname, phy, reg, rv));
3467
3468 return (rv);
3469 }
3470
3471 /*
3472 * wm_gmii_i82543_writereg: [mii interface function]
3473 *
3474 * Write a PHY register on the GMII (i82543 version).
3475 */
3476 static void
3477 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3478 {
3479 struct wm_softc *sc = (void *) self;
3480
3481 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3482 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3483 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3484 (MII_COMMAND_START << 30), 32);
3485 }
3486
3487 /*
3488 * wm_gmii_i82544_readreg: [mii interface function]
3489 *
3490 * Read a PHY register on the GMII.
3491 */
3492 static int
3493 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3494 {
3495 struct wm_softc *sc = (void *) self;
3496 uint32_t mdic = 0;
3497 int i, rv;
3498
3499 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3500 MDIC_REGADD(reg));
3501
3502 for (i = 0; i < 100; i++) {
3503 mdic = CSR_READ(sc, WMREG_MDIC);
3504 if (mdic & MDIC_READY)
3505 break;
3506 delay(10);
3507 }
3508
3509 if ((mdic & MDIC_READY) == 0) {
3510 printf("%s: MDIC read timed out: phy %d reg %d\n",
3511 sc->sc_dev.dv_xname, phy, reg);
3512 rv = 0;
3513 } else if (mdic & MDIC_E) {
3514 #if 0 /* This is normal if no PHY is present. */
3515 printf("%s: MDIC read error: phy %d reg %d\n",
3516 sc->sc_dev.dv_xname, phy, reg);
3517 #endif
3518 rv = 0;
3519 } else {
3520 rv = MDIC_DATA(mdic);
3521 if (rv == 0xffff)
3522 rv = 0;
3523 }
3524
3525 return (rv);
3526 }
3527
3528 /*
3529 * wm_gmii_i82544_writereg: [mii interface function]
3530 *
3531 * Write a PHY register on the GMII.
3532 */
3533 static void
3534 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3535 {
3536 struct wm_softc *sc = (void *) self;
3537 uint32_t mdic = 0;
3538 int i;
3539
3540 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3541 MDIC_REGADD(reg) | MDIC_DATA(val));
3542
3543 for (i = 0; i < 100; i++) {
3544 mdic = CSR_READ(sc, WMREG_MDIC);
3545 if (mdic & MDIC_READY)
3546 break;
3547 delay(10);
3548 }
3549
3550 if ((mdic & MDIC_READY) == 0)
3551 printf("%s: MDIC write timed out: phy %d reg %d\n",
3552 sc->sc_dev.dv_xname, phy, reg);
3553 else if (mdic & MDIC_E)
3554 printf("%s: MDIC write error: phy %d reg %d\n",
3555 sc->sc_dev.dv_xname, phy, reg);
3556 }
3557
3558 /*
3559 * wm_gmii_statchg: [mii interface function]
3560 *
3561 * Callback from MII layer when media changes.
3562 */
3563 static void
3564 wm_gmii_statchg(struct device *self)
3565 {
3566 struct wm_softc *sc = (void *) self;
3567 struct mii_data *mii = &sc->sc_mii;
3568
3569 sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
3570 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3571 sc->sc_fcrtl &= ~FCRTL_XONE;
3572
3573 /*
3574 * Get flow control negotiation result.
3575 */
3576 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
3577 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
3578 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
3579 mii->mii_media_active &= ~IFM_ETH_FMASK;
3580 }
3581
3582 if (sc->sc_flowflags & IFM_FLOW) {
3583 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
3584 sc->sc_ctrl |= CTRL_TFCE;
3585 sc->sc_fcrtl |= FCRTL_XONE;
3586 }
3587 if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
3588 sc->sc_ctrl |= CTRL_RFCE;
3589 }
3590
3591 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3592 DPRINTF(WM_DEBUG_LINK,
3593 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3594 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3595 } else {
3596 DPRINTF(WM_DEBUG_LINK,
3597 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3598 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3599 }
3600
3601 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3602 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3603 CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
3604 : WMREG_FCRTL, sc->sc_fcrtl);
3605 }
3606