if_wm.c revision 1.52 1 /* $NetBSD: if_wm.c,v 1.52 2003/10/21 04:35:01 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 */
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.52 2003/10/21 04:35:01 thorpej Exp $");
48
49 #include "bpfilter.h"
50 #include "rnd.h"
51
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/callout.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/kernel.h>
58 #include <sys/socket.h>
59 #include <sys/ioctl.h>
60 #include <sys/errno.h>
61 #include <sys/device.h>
62 #include <sys/queue.h>
63
64 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
65
66 #if NRND > 0
67 #include <sys/rnd.h>
68 #endif
69
70 #include <net/if.h>
71 #include <net/if_dl.h>
72 #include <net/if_media.h>
73 #include <net/if_ether.h>
74
75 #if NBPFILTER > 0
76 #include <net/bpf.h>
77 #endif
78
79 #include <netinet/in.h> /* XXX for struct ip */
80 #include <netinet/in_systm.h> /* XXX for struct ip */
81 #include <netinet/ip.h> /* XXX for struct ip */
82 #include <netinet/tcp.h> /* XXX for struct tcphdr */
83
84 #include <machine/bus.h>
85 #include <machine/intr.h>
86 #include <machine/endian.h>
87
88 #include <dev/mii/mii.h>
89 #include <dev/mii/miivar.h>
90 #include <dev/mii/mii_bitbang.h>
91
92 #include <dev/pci/pcireg.h>
93 #include <dev/pci/pcivar.h>
94 #include <dev/pci/pcidevs.h>
95
96 #include <dev/pci/if_wmreg.h>
97
98 #ifdef WM_DEBUG
99 #define WM_DEBUG_LINK 0x01
100 #define WM_DEBUG_TX 0x02
101 #define WM_DEBUG_RX 0x04
102 #define WM_DEBUG_GMII 0x08
103 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
104
105 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
106 #else
107 #define DPRINTF(x, y) /* nothing */
108 #endif /* WM_DEBUG */
109
110 /*
111 * Transmit descriptor list size. Due to errata, we can only have
112 * 256 hardware descriptors in the ring. We tell the upper layers
113 * that they can queue a lot of packets, and we go ahead and manage
114 * up to 64 of them at a time. We allow up to 16 DMA segments per
115 * packet.
116 */
117 #define WM_NTXSEGS 16
118 #define WM_IFQUEUELEN 256
119 #define WM_TXQUEUELEN 64
120 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
121 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
122 #define WM_NTXDESC 256
123 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
124 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
125 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
126
127 /*
128 * Receive descriptor list size. We have one Rx buffer for normal
129 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
130 * packet. We allocate 256 receive descriptors, each with a 2k
131 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
132 */
133 #define WM_NRXDESC 256
134 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
135 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
136 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
137
138 /*
139 * Control structures are DMA'd to the i82542 chip. We allocate them in
140 * a single clump that maps to a single DMA segment to make serveral things
141 * easier.
142 */
143 struct wm_control_data {
144 /*
145 * The transmit descriptors.
146 */
147 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
148
149 /*
150 * The receive descriptors.
151 */
152 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
153 };
154
155 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
156 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
157 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
158
159 /*
160 * Software state for transmit jobs.
161 */
162 struct wm_txsoft {
163 struct mbuf *txs_mbuf; /* head of our mbuf chain */
164 bus_dmamap_t txs_dmamap; /* our DMA map */
165 int txs_firstdesc; /* first descriptor in packet */
166 int txs_lastdesc; /* last descriptor in packet */
167 int txs_ndesc; /* # of descriptors used */
168 };
169
170 /*
171 * Software state for receive buffers. Each descriptor gets a
172 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
173 * more than one buffer, we chain them together.
174 */
175 struct wm_rxsoft {
176 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
177 bus_dmamap_t rxs_dmamap; /* our DMA map */
178 };
179
180 typedef enum {
181 WM_T_unknown = 0,
182 WM_T_82542_2_0, /* i82542 2.0 (really old) */
183 WM_T_82542_2_1, /* i82542 2.1+ (old) */
184 WM_T_82543, /* i82543 */
185 WM_T_82544, /* i82544 */
186 WM_T_82540, /* i82540 */
187 WM_T_82545, /* i82545 */
188 WM_T_82545_3, /* i82545 3.0+ */
189 WM_T_82546, /* i82546 */
190 WM_T_82546_3, /* i82546 3.0+ */
191 WM_T_82541, /* i82541 */
192 WM_T_82541_2, /* i82541 2.0+ */
193 WM_T_82547, /* i82547 */
194 WM_T_82547_2, /* i82547 2.0+ */
195 } wm_chip_type;
196
197 /*
198 * Software state per device.
199 */
200 struct wm_softc {
201 struct device sc_dev; /* generic device information */
202 bus_space_tag_t sc_st; /* bus space tag */
203 bus_space_handle_t sc_sh; /* bus space handle */
204 bus_dma_tag_t sc_dmat; /* bus DMA tag */
205 struct ethercom sc_ethercom; /* ethernet common data */
206 void *sc_sdhook; /* shutdown hook */
207
208 wm_chip_type sc_type; /* chip type */
209 int sc_flags; /* flags; see below */
210 int sc_bus_speed; /* PCI/PCIX bus speed */
211
212 void *sc_ih; /* interrupt cookie */
213
214 int sc_ee_addrbits; /* EEPROM address bits */
215
216 struct mii_data sc_mii; /* MII/media information */
217
218 struct callout sc_tick_ch; /* tick callout */
219
220 bus_dmamap_t sc_cddmamap; /* control data DMA map */
221 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
222
223 int sc_align_tweak;
224
225 /*
226 * Software state for the transmit and receive descriptors.
227 */
228 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
229 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
230
231 /*
232 * Control data structures.
233 */
234 struct wm_control_data *sc_control_data;
235 #define sc_txdescs sc_control_data->wcd_txdescs
236 #define sc_rxdescs sc_control_data->wcd_rxdescs
237
238 #ifdef WM_EVENT_COUNTERS
239 /* Event counters. */
240 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
241 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
242 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
243 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
244 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
245 struct evcnt sc_ev_rxintr; /* Rx interrupts */
246 struct evcnt sc_ev_linkintr; /* Link interrupts */
247
248 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
249 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
250 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
251 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
252
253 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
254 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
255 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
256
257 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
258 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
259
260 struct evcnt sc_ev_tu; /* Tx underrun */
261 #endif /* WM_EVENT_COUNTERS */
262
263 bus_addr_t sc_tdt_reg; /* offset of TDT register */
264
265 int sc_txfree; /* number of free Tx descriptors */
266 int sc_txnext; /* next ready Tx descriptor */
267
268 int sc_txsfree; /* number of free Tx jobs */
269 int sc_txsnext; /* next free Tx job */
270 int sc_txsdirty; /* dirty Tx jobs */
271
272 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
273 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
274
275 bus_addr_t sc_rdt_reg; /* offset of RDT register */
276
277 int sc_rxptr; /* next ready Rx descriptor/queue ent */
278 int sc_rxdiscard;
279 int sc_rxlen;
280 struct mbuf *sc_rxhead;
281 struct mbuf *sc_rxtail;
282 struct mbuf **sc_rxtailp;
283
284 uint32_t sc_ctrl; /* prototype CTRL register */
285 #if 0
286 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
287 #endif
288 uint32_t sc_icr; /* prototype interrupt bits */
289 uint32_t sc_tctl; /* prototype TCTL register */
290 uint32_t sc_rctl; /* prototype RCTL register */
291 uint32_t sc_txcw; /* prototype TXCW register */
292 uint32_t sc_tipg; /* prototype TIPG register */
293
294 int sc_tbi_linkup; /* TBI link status */
295 int sc_tbi_anstate; /* autonegotiation state */
296
297 int sc_mchash_type; /* multicast filter offset */
298
299 #if NRND > 0
300 rndsource_element_t rnd_source; /* random source */
301 #endif
302 };
303
304 #define WM_RXCHAIN_RESET(sc) \
305 do { \
306 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
307 *(sc)->sc_rxtailp = NULL; \
308 (sc)->sc_rxlen = 0; \
309 } while (/*CONSTCOND*/0)
310
311 #define WM_RXCHAIN_LINK(sc, m) \
312 do { \
313 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
314 (sc)->sc_rxtailp = &(m)->m_next; \
315 } while (/*CONSTCOND*/0)
316
317 /* sc_flags */
318 #define WM_F_HAS_MII 0x01 /* has MII */
319 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
320 #define WM_F_BUS64 0x10 /* bus is 64-bit */
321 #define WM_F_PCIX 0x20 /* bus is PCI-X */
322
323 #ifdef WM_EVENT_COUNTERS
324 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
325 #else
326 #define WM_EVCNT_INCR(ev) /* nothing */
327 #endif
328
329 #define CSR_READ(sc, reg) \
330 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
331 #define CSR_WRITE(sc, reg, val) \
332 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
333
334 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
335 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
336
337 #define WM_CDTXSYNC(sc, x, n, ops) \
338 do { \
339 int __x, __n; \
340 \
341 __x = (x); \
342 __n = (n); \
343 \
344 /* If it will wrap around, sync to the end of the ring. */ \
345 if ((__x + __n) > WM_NTXDESC) { \
346 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
347 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
348 (WM_NTXDESC - __x), (ops)); \
349 __n -= (WM_NTXDESC - __x); \
350 __x = 0; \
351 } \
352 \
353 /* Now sync whatever is left. */ \
354 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
355 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
356 } while (/*CONSTCOND*/0)
357
358 #define WM_CDRXSYNC(sc, x, ops) \
359 do { \
360 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
361 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
362 } while (/*CONSTCOND*/0)
363
364 #define WM_INIT_RXDESC(sc, x) \
365 do { \
366 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
367 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
368 struct mbuf *__m = __rxs->rxs_mbuf; \
369 \
370 /* \
371 * Note: We scoot the packet forward 2 bytes in the buffer \
372 * so that the payload after the Ethernet header is aligned \
373 * to a 4-byte boundary. \
374 * \
375 * XXX BRAINDAMAGE ALERT! \
376 * The stupid chip uses the same size for every buffer, which \
377 * is set in the Receive Control register. We are using the 2K \
378 * size option, but what we REALLY want is (2K - 2)! For this \
379 * reason, we can't "scoot" packets longer than the standard \
380 * Ethernet MTU. On strict-alignment platforms, if the total \
381 * size exceeds (2K - 2) we set align_tweak to 0 and let \
382 * the upper layer copy the headers. \
383 */ \
384 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
385 \
386 __rxd->wrx_addr.wa_low = \
387 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
388 (sc)->sc_align_tweak); \
389 __rxd->wrx_addr.wa_high = 0; \
390 __rxd->wrx_len = 0; \
391 __rxd->wrx_cksum = 0; \
392 __rxd->wrx_status = 0; \
393 __rxd->wrx_errors = 0; \
394 __rxd->wrx_special = 0; \
395 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
396 \
397 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
398 } while (/*CONSTCOND*/0)
399
400 static void wm_start(struct ifnet *);
401 static void wm_watchdog(struct ifnet *);
402 static int wm_ioctl(struct ifnet *, u_long, caddr_t);
403 static int wm_init(struct ifnet *);
404 static void wm_stop(struct ifnet *, int);
405
406 static void wm_shutdown(void *);
407
408 static void wm_reset(struct wm_softc *);
409 static void wm_rxdrain(struct wm_softc *);
410 static int wm_add_rxbuf(struct wm_softc *, int);
411 static int wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
412 static void wm_tick(void *);
413
414 static void wm_set_filter(struct wm_softc *);
415
416 static int wm_intr(void *);
417 static void wm_txintr(struct wm_softc *);
418 static void wm_rxintr(struct wm_softc *);
419 static void wm_linkintr(struct wm_softc *, uint32_t);
420
421 static void wm_tbi_mediainit(struct wm_softc *);
422 static int wm_tbi_mediachange(struct ifnet *);
423 static void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
424
425 static void wm_tbi_set_linkled(struct wm_softc *);
426 static void wm_tbi_check_link(struct wm_softc *);
427
428 static void wm_gmii_reset(struct wm_softc *);
429
430 static int wm_gmii_i82543_readreg(struct device *, int, int);
431 static void wm_gmii_i82543_writereg(struct device *, int, int, int);
432
433 static int wm_gmii_i82544_readreg(struct device *, int, int);
434 static void wm_gmii_i82544_writereg(struct device *, int, int, int);
435
436 static void wm_gmii_statchg(struct device *);
437
438 static void wm_gmii_mediainit(struct wm_softc *);
439 static int wm_gmii_mediachange(struct ifnet *);
440 static void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
441
442 static int wm_match(struct device *, struct cfdata *, void *);
443 static void wm_attach(struct device *, struct device *, void *);
444
445 CFATTACH_DECL(wm, sizeof(struct wm_softc),
446 wm_match, wm_attach, NULL, NULL);
447
448 /*
449 * Devices supported by this driver.
450 */
451 const struct wm_product {
452 pci_vendor_id_t wmp_vendor;
453 pci_product_id_t wmp_product;
454 const char *wmp_name;
455 wm_chip_type wmp_type;
456 int wmp_flags;
457 #define WMP_F_1000X 0x01
458 #define WMP_F_1000T 0x02
459 } wm_products[] = {
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
461 "Intel i82542 1000BASE-X Ethernet",
462 WM_T_82542_2_1, WMP_F_1000X },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
465 "Intel i82543GC 1000BASE-X Ethernet",
466 WM_T_82543, WMP_F_1000X },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
469 "Intel i82543GC 1000BASE-T Ethernet",
470 WM_T_82543, WMP_F_1000T },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
473 "Intel i82544EI 1000BASE-T Ethernet",
474 WM_T_82544, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
477 "Intel i82544EI 1000BASE-X Ethernet",
478 WM_T_82544, WMP_F_1000X },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
481 "Intel i82544GC 1000BASE-T Ethernet",
482 WM_T_82544, WMP_F_1000T },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
485 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
486 WM_T_82544, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
489 "Intel i82540EM 1000BASE-T Ethernet",
490 WM_T_82540, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
493 "Intel i82540EP 1000BASE-T Ethernet",
494 WM_T_82540, WMP_F_1000T },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
497 "Intel i82540EP 1000BASE-T Ethernet",
498 WM_T_82540, WMP_F_1000T },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
501 "Intel i82540EP 1000BASE-T Ethernet",
502 WM_T_82540, WMP_F_1000T },
503
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
505 "Intel i82545EM 1000BASE-T Ethernet",
506 WM_T_82545, WMP_F_1000T },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
509 "Intel i82546EB 1000BASE-T Ethernet",
510 WM_T_82546, WMP_F_1000T },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
513 "Intel i82546EB 1000BASE-T Ethernet",
514 WM_T_82546, WMP_F_1000T },
515
516 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
517 "Intel i82545EM 1000BASE-X Ethernet",
518 WM_T_82545, WMP_F_1000X },
519
520 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
521 "Intel i82546EB 1000BASE-X Ethernet",
522 WM_T_82546, WMP_F_1000X },
523
524 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
525 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
526 WM_T_82540, WMP_F_1000T },
527
528 { 0, 0,
529 NULL,
530 0, 0 },
531 };
532
533 #ifdef WM_EVENT_COUNTERS
534 #if WM_NTXSEGS != 16
535 #error Update wm_txseg_evcnt_names
536 #endif
537 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
538 "txseg1",
539 "txseg2",
540 "txseg3",
541 "txseg4",
542 "txseg5",
543 "txseg6",
544 "txseg7",
545 "txseg8",
546 "txseg9",
547 "txseg10",
548 "txseg11",
549 "txseg12",
550 "txseg13",
551 "txseg14",
552 "txseg15",
553 "txseg16",
554 };
555 #endif /* WM_EVENT_COUNTERS */
556
557 static const struct wm_product *
558 wm_lookup(const struct pci_attach_args *pa)
559 {
560 const struct wm_product *wmp;
561
562 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
563 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
564 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
565 return (wmp);
566 }
567 return (NULL);
568 }
569
570 static int
571 wm_match(struct device *parent, struct cfdata *cf, void *aux)
572 {
573 struct pci_attach_args *pa = aux;
574
575 if (wm_lookup(pa) != NULL)
576 return (1);
577
578 return (0);
579 }
580
581 static void
582 wm_attach(struct device *parent, struct device *self, void *aux)
583 {
584 struct wm_softc *sc = (void *) self;
585 struct pci_attach_args *pa = aux;
586 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
587 pci_chipset_tag_t pc = pa->pa_pc;
588 pci_intr_handle_t ih;
589 const char *intrstr = NULL;
590 const char *eetype;
591 bus_space_tag_t memt;
592 bus_space_handle_t memh;
593 bus_dma_segment_t seg;
594 int memh_valid;
595 int i, rseg, error;
596 const struct wm_product *wmp;
597 uint8_t enaddr[ETHER_ADDR_LEN];
598 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
599 pcireg_t preg, memtype;
600 uint32_t reg;
601 int pmreg;
602
603 callout_init(&sc->sc_tick_ch);
604
605 wmp = wm_lookup(pa);
606 if (wmp == NULL) {
607 printf("\n");
608 panic("wm_attach: impossible");
609 }
610
611 sc->sc_dmat = pa->pa_dmat;
612
613 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
614 aprint_naive(": Ethernet controller\n");
615 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
616
617 sc->sc_type = wmp->wmp_type;
618 if (sc->sc_type < WM_T_82543) {
619 if (preg < 2) {
620 aprint_error("%s: i82542 must be at least rev. 2\n",
621 sc->sc_dev.dv_xname);
622 return;
623 }
624 if (preg < 3)
625 sc->sc_type = WM_T_82542_2_0;
626 }
627
628 /*
629 * Map the device.
630 */
631 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
632 switch (memtype) {
633 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
634 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
635 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
636 memtype, 0, &memt, &memh, NULL, NULL) == 0);
637 break;
638 default:
639 memh_valid = 0;
640 }
641
642 if (memh_valid) {
643 sc->sc_st = memt;
644 sc->sc_sh = memh;
645 } else {
646 aprint_error("%s: unable to map device registers\n",
647 sc->sc_dev.dv_xname);
648 return;
649 }
650
651 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
652 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
653 preg |= PCI_COMMAND_MASTER_ENABLE;
654 if (sc->sc_type < WM_T_82542_2_1)
655 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
656 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
657
658 /* Get it out of power save mode, if needed. */
659 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
660 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
661 PCI_PMCSR_STATE_MASK;
662 if (preg == PCI_PMCSR_STATE_D3) {
663 /*
664 * The card has lost all configuration data in
665 * this state, so punt.
666 */
667 aprint_error("%s: unable to wake from power state D3\n",
668 sc->sc_dev.dv_xname);
669 return;
670 }
671 if (preg != PCI_PMCSR_STATE_D0) {
672 aprint_normal("%s: waking up from power state D%d\n",
673 sc->sc_dev.dv_xname, preg);
674 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
675 PCI_PMCSR_STATE_D0);
676 }
677 }
678
679 /*
680 * Map and establish our interrupt.
681 */
682 if (pci_intr_map(pa, &ih)) {
683 aprint_error("%s: unable to map interrupt\n",
684 sc->sc_dev.dv_xname);
685 return;
686 }
687 intrstr = pci_intr_string(pc, ih);
688 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
689 if (sc->sc_ih == NULL) {
690 aprint_error("%s: unable to establish interrupt",
691 sc->sc_dev.dv_xname);
692 if (intrstr != NULL)
693 aprint_normal(" at %s", intrstr);
694 aprint_normal("\n");
695 return;
696 }
697 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
698
699 /*
700 * Determine a few things about the bus we're connected to.
701 */
702 if (sc->sc_type < WM_T_82543) {
703 /* We don't really know the bus characteristics here. */
704 sc->sc_bus_speed = 33;
705 } else {
706 reg = CSR_READ(sc, WMREG_STATUS);
707 if (reg & STATUS_BUS64)
708 sc->sc_flags |= WM_F_BUS64;
709 if (sc->sc_type >= WM_T_82544 &&
710 (reg & STATUS_PCIX_MODE) != 0)
711 sc->sc_flags |= WM_F_PCIX;
712 /*
713 * The quad port adapter is special; it has a PCIX-PCIX
714 * bridge on the board, and can run the secondary bus at
715 * a higher speed.
716 */
717 if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
718 sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
719 : 66;
720 } else if (sc->sc_flags & WM_F_PCIX) {
721 switch (STATUS_PCIXSPD(reg)) {
722 case STATUS_PCIXSPD_50_66:
723 sc->sc_bus_speed = 66;
724 break;
725 case STATUS_PCIXSPD_66_100:
726 sc->sc_bus_speed = 100;
727 break;
728 case STATUS_PCIXSPD_100_133:
729 sc->sc_bus_speed = 133;
730 break;
731 default:
732 aprint_error(
733 "%s: unknown PCIXSPD %d; assuming 66MHz\n",
734 sc->sc_dev.dv_xname, STATUS_PCIXSPD(reg));
735 sc->sc_bus_speed = 66;
736 }
737 } else
738 sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
739 aprint_verbose("%s: %d-bit %dMHz %s bus\n", sc->sc_dev.dv_xname,
740 (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
741 (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
742 }
743
744 /*
745 * Allocate the control data structures, and create and load the
746 * DMA map for it.
747 */
748 if ((error = bus_dmamem_alloc(sc->sc_dmat,
749 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
750 0)) != 0) {
751 aprint_error(
752 "%s: unable to allocate control data, error = %d\n",
753 sc->sc_dev.dv_xname, error);
754 goto fail_0;
755 }
756
757 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
758 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
759 0)) != 0) {
760 aprint_error("%s: unable to map control data, error = %d\n",
761 sc->sc_dev.dv_xname, error);
762 goto fail_1;
763 }
764
765 if ((error = bus_dmamap_create(sc->sc_dmat,
766 sizeof(struct wm_control_data), 1,
767 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
768 aprint_error("%s: unable to create control data DMA map, "
769 "error = %d\n", sc->sc_dev.dv_xname, error);
770 goto fail_2;
771 }
772
773 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
774 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
775 0)) != 0) {
776 aprint_error(
777 "%s: unable to load control data DMA map, error = %d\n",
778 sc->sc_dev.dv_xname, error);
779 goto fail_3;
780 }
781
782 /*
783 * Create the transmit buffer DMA maps.
784 */
785 for (i = 0; i < WM_TXQUEUELEN; i++) {
786 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
787 WM_NTXSEGS, MCLBYTES, 0, 0,
788 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
789 aprint_error("%s: unable to create Tx DMA map %d, "
790 "error = %d\n", sc->sc_dev.dv_xname, i, error);
791 goto fail_4;
792 }
793 }
794
795 /*
796 * Create the receive buffer DMA maps.
797 */
798 for (i = 0; i < WM_NRXDESC; i++) {
799 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
800 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
801 aprint_error("%s: unable to create Rx DMA map %d, "
802 "error = %d\n", sc->sc_dev.dv_xname, i, error);
803 goto fail_5;
804 }
805 sc->sc_rxsoft[i].rxs_mbuf = NULL;
806 }
807
808 /*
809 * Reset the chip to a known state.
810 */
811 wm_reset(sc);
812
813 /*
814 * Get some information about the EEPROM.
815 */
816 eetype = "MicroWire";
817 if (sc->sc_type >= WM_T_82540)
818 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
819 if (sc->sc_type <= WM_T_82544)
820 sc->sc_ee_addrbits = 6;
821 else if (sc->sc_type <= WM_T_82546_3) {
822 reg = CSR_READ(sc, WMREG_EECD);
823 if (reg & EECD_EE_SIZE)
824 sc->sc_ee_addrbits = 8;
825 else
826 sc->sc_ee_addrbits = 6;
827 }
828 aprint_verbose("%s: %u word (%d address bits) %s EEPROM\n",
829 sc->sc_dev.dv_xname, 1U << sc->sc_ee_addrbits,
830 sc->sc_ee_addrbits, eetype);
831
832 /*
833 * Read the Ethernet address from the EEPROM.
834 */
835 if (wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
836 sizeof(myea) / sizeof(myea[0]), myea)) {
837 aprint_error("%s: unable to read Ethernet address\n",
838 sc->sc_dev.dv_xname);
839 return;
840 }
841 enaddr[0] = myea[0] & 0xff;
842 enaddr[1] = myea[0] >> 8;
843 enaddr[2] = myea[1] & 0xff;
844 enaddr[3] = myea[1] >> 8;
845 enaddr[4] = myea[2] & 0xff;
846 enaddr[5] = myea[2] >> 8;
847
848 /*
849 * Toggle the LSB of the MAC address on the second port
850 * of the i82546.
851 */
852 if (sc->sc_type == WM_T_82546) {
853 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
854 enaddr[5] ^= 1;
855 }
856
857 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
858 ether_sprintf(enaddr));
859
860 /*
861 * Read the config info from the EEPROM, and set up various
862 * bits in the control registers based on their contents.
863 */
864 if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
865 aprint_error("%s: unable to read CFG1 from EEPROM\n",
866 sc->sc_dev.dv_xname);
867 return;
868 }
869 if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
870 aprint_error("%s: unable to read CFG2 from EEPROM\n",
871 sc->sc_dev.dv_xname);
872 return;
873 }
874 if (sc->sc_type >= WM_T_82544) {
875 if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
876 aprint_error("%s: unable to read SWDPIN from EEPROM\n",
877 sc->sc_dev.dv_xname);
878 return;
879 }
880 }
881
882 if (cfg1 & EEPROM_CFG1_ILOS)
883 sc->sc_ctrl |= CTRL_ILOS;
884 if (sc->sc_type >= WM_T_82544) {
885 sc->sc_ctrl |=
886 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
887 CTRL_SWDPIO_SHIFT;
888 sc->sc_ctrl |=
889 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
890 CTRL_SWDPINS_SHIFT;
891 } else {
892 sc->sc_ctrl |=
893 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
894 CTRL_SWDPIO_SHIFT;
895 }
896
897 #if 0
898 if (sc->sc_type >= WM_T_82544) {
899 if (cfg1 & EEPROM_CFG1_IPS0)
900 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
901 if (cfg1 & EEPROM_CFG1_IPS1)
902 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
903 sc->sc_ctrl_ext |=
904 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
905 CTRL_EXT_SWDPIO_SHIFT;
906 sc->sc_ctrl_ext |=
907 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
908 CTRL_EXT_SWDPINS_SHIFT;
909 } else {
910 sc->sc_ctrl_ext |=
911 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
912 CTRL_EXT_SWDPIO_SHIFT;
913 }
914 #endif
915
916 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
917 #if 0
918 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
919 #endif
920
921 /*
922 * Set up some register offsets that are different between
923 * the i82542 and the i82543 and later chips.
924 */
925 if (sc->sc_type < WM_T_82543) {
926 sc->sc_rdt_reg = WMREG_OLD_RDT0;
927 sc->sc_tdt_reg = WMREG_OLD_TDT;
928 } else {
929 sc->sc_rdt_reg = WMREG_RDT;
930 sc->sc_tdt_reg = WMREG_TDT;
931 }
932
933 /*
934 * Determine if we should use flow control. We should
935 * always use it, unless we're on a i82542 < 2.1.
936 */
937 if (sc->sc_type >= WM_T_82542_2_1)
938 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
939
940 /*
941 * Determine if we're TBI or GMII mode, and initialize the
942 * media structures accordingly.
943 */
944 if (sc->sc_type < WM_T_82543 ||
945 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
946 if (wmp->wmp_flags & WMP_F_1000T)
947 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
948 "product!\n", sc->sc_dev.dv_xname);
949 wm_tbi_mediainit(sc);
950 } else {
951 if (wmp->wmp_flags & WMP_F_1000X)
952 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
953 "product!\n", sc->sc_dev.dv_xname);
954 wm_gmii_mediainit(sc);
955 }
956
957 ifp = &sc->sc_ethercom.ec_if;
958 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
959 ifp->if_softc = sc;
960 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
961 ifp->if_ioctl = wm_ioctl;
962 ifp->if_start = wm_start;
963 ifp->if_watchdog = wm_watchdog;
964 ifp->if_init = wm_init;
965 ifp->if_stop = wm_stop;
966 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
967 IFQ_SET_READY(&ifp->if_snd);
968
969 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
970
971 /*
972 * If we're a i82543 or greater, we can support VLANs.
973 */
974 if (sc->sc_type >= WM_T_82543)
975 sc->sc_ethercom.ec_capabilities |=
976 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
977
978 /*
979 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
980 * on i82543 and later.
981 */
982 if (sc->sc_type >= WM_T_82543)
983 ifp->if_capabilities |=
984 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
985
986 /*
987 * Attach the interface.
988 */
989 if_attach(ifp);
990 ether_ifattach(ifp, enaddr);
991 #if NRND > 0
992 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
993 RND_TYPE_NET, 0);
994 #endif
995
996 #ifdef WM_EVENT_COUNTERS
997 /* Attach event counters. */
998 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
999 NULL, sc->sc_dev.dv_xname, "txsstall");
1000 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
1001 NULL, sc->sc_dev.dv_xname, "txdstall");
1002 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
1003 NULL, sc->sc_dev.dv_xname, "txforceintr");
1004 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
1005 NULL, sc->sc_dev.dv_xname, "txdw");
1006 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
1007 NULL, sc->sc_dev.dv_xname, "txqe");
1008 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
1009 NULL, sc->sc_dev.dv_xname, "rxintr");
1010 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
1011 NULL, sc->sc_dev.dv_xname, "linkintr");
1012
1013 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
1014 NULL, sc->sc_dev.dv_xname, "rxipsum");
1015 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
1016 NULL, sc->sc_dev.dv_xname, "rxtusum");
1017 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
1018 NULL, sc->sc_dev.dv_xname, "txipsum");
1019 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
1020 NULL, sc->sc_dev.dv_xname, "txtusum");
1021
1022 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
1023 NULL, sc->sc_dev.dv_xname, "txctx init");
1024 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
1025 NULL, sc->sc_dev.dv_xname, "txctx hit");
1026 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
1027 NULL, sc->sc_dev.dv_xname, "txctx miss");
1028
1029 for (i = 0; i < WM_NTXSEGS; i++)
1030 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
1031 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
1032
1033 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
1034 NULL, sc->sc_dev.dv_xname, "txdrop");
1035
1036 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
1037 NULL, sc->sc_dev.dv_xname, "tu");
1038 #endif /* WM_EVENT_COUNTERS */
1039
1040 /*
1041 * Make sure the interface is shutdown during reboot.
1042 */
1043 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
1044 if (sc->sc_sdhook == NULL)
1045 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
1046 sc->sc_dev.dv_xname);
1047 return;
1048
1049 /*
1050 * Free any resources we've allocated during the failed attach
1051 * attempt. Do this in reverse order and fall through.
1052 */
1053 fail_5:
1054 for (i = 0; i < WM_NRXDESC; i++) {
1055 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
1056 bus_dmamap_destroy(sc->sc_dmat,
1057 sc->sc_rxsoft[i].rxs_dmamap);
1058 }
1059 fail_4:
1060 for (i = 0; i < WM_TXQUEUELEN; i++) {
1061 if (sc->sc_txsoft[i].txs_dmamap != NULL)
1062 bus_dmamap_destroy(sc->sc_dmat,
1063 sc->sc_txsoft[i].txs_dmamap);
1064 }
1065 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
1066 fail_3:
1067 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
1068 fail_2:
1069 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
1070 sizeof(struct wm_control_data));
1071 fail_1:
1072 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
1073 fail_0:
1074 return;
1075 }
1076
1077 /*
1078 * wm_shutdown:
1079 *
1080 * Make sure the interface is stopped at reboot time.
1081 */
1082 static void
1083 wm_shutdown(void *arg)
1084 {
1085 struct wm_softc *sc = arg;
1086
1087 wm_stop(&sc->sc_ethercom.ec_if, 1);
1088 }
1089
1090 /*
1091 * wm_tx_cksum:
1092 *
1093 * Set up TCP/IP checksumming parameters for the
1094 * specified packet.
1095 */
1096 static int
1097 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1098 uint32_t *fieldsp)
1099 {
1100 struct mbuf *m0 = txs->txs_mbuf;
1101 struct livengood_tcpip_ctxdesc *t;
1102 uint32_t fields = 0, ipcs, tucs;
1103 struct ip *ip;
1104 struct ether_header *eh;
1105 int offset, iphl;
1106
1107 /*
1108 * XXX It would be nice if the mbuf pkthdr had offset
1109 * fields for the protocol headers.
1110 */
1111
1112 eh = mtod(m0, struct ether_header *);
1113 switch (htons(eh->ether_type)) {
1114 case ETHERTYPE_IP:
1115 iphl = sizeof(struct ip);
1116 offset = ETHER_HDR_LEN;
1117 break;
1118
1119 case ETHERTYPE_VLAN:
1120 iphl = sizeof(struct ip);
1121 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1122 break;
1123
1124 default:
1125 /*
1126 * Don't support this protocol or encapsulation.
1127 */
1128 *fieldsp = 0;
1129 *cmdp = 0;
1130 return (0);
1131 }
1132
1133 if (m0->m_len < (offset + iphl)) {
1134 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1135 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1136 "packet dropped\n", sc->sc_dev.dv_xname);
1137 return (ENOMEM);
1138 }
1139 m0 = txs->txs_mbuf;
1140 }
1141
1142 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1143 iphl = ip->ip_hl << 2;
1144
1145 /*
1146 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1147 * offload feature, if we load the context descriptor, we
1148 * MUST provide valid values for IPCSS and TUCSS fields.
1149 */
1150
1151 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1152 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1153 fields |= htole32(WTX_IXSM);
1154 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1155 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1156 WTX_TCPIP_IPCSE(offset + iphl - 1));
1157 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1158 /* Use the cached value. */
1159 ipcs = sc->sc_txctx_ipcs;
1160 } else {
1161 /* Just initialize it to the likely value anyway. */
1162 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1163 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1164 WTX_TCPIP_IPCSE(offset + iphl - 1));
1165 }
1166
1167 offset += iphl;
1168
1169 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1170 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1171 fields |= htole32(WTX_TXSM);
1172 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1173 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1174 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1175 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1176 /* Use the cached value. */
1177 tucs = sc->sc_txctx_tucs;
1178 } else {
1179 /* Just initialize it to a valid TCP context. */
1180 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1181 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1182 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1183 }
1184
1185 if (sc->sc_txctx_ipcs == ipcs &&
1186 sc->sc_txctx_tucs == tucs) {
1187 /* Cached context is fine. */
1188 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1189 } else {
1190 /* Fill in the context descriptor. */
1191 #ifdef WM_EVENT_COUNTERS
1192 if (sc->sc_txctx_ipcs == 0xffffffff &&
1193 sc->sc_txctx_tucs == 0xffffffff)
1194 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1195 else
1196 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1197 #endif
1198 t = (struct livengood_tcpip_ctxdesc *)
1199 &sc->sc_txdescs[sc->sc_txnext];
1200 t->tcpip_ipcs = ipcs;
1201 t->tcpip_tucs = tucs;
1202 t->tcpip_cmdlen =
1203 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1204 t->tcpip_seg = 0;
1205 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1206
1207 sc->sc_txctx_ipcs = ipcs;
1208 sc->sc_txctx_tucs = tucs;
1209
1210 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1211 txs->txs_ndesc++;
1212 }
1213
1214 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1215 *fieldsp = fields;
1216
1217 return (0);
1218 }
1219
1220 /*
1221 * wm_start: [ifnet interface function]
1222 *
1223 * Start packet transmission on the interface.
1224 */
1225 static void
1226 wm_start(struct ifnet *ifp)
1227 {
1228 struct wm_softc *sc = ifp->if_softc;
1229 struct mbuf *m0;
1230 #if 0 /* XXXJRT */
1231 struct m_tag *mtag;
1232 #endif
1233 struct wm_txsoft *txs;
1234 bus_dmamap_t dmamap;
1235 int error, nexttx, lasttx, ofree, seg;
1236 uint32_t cksumcmd, cksumfields;
1237
1238 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1239 return;
1240
1241 /*
1242 * Remember the previous number of free descriptors.
1243 */
1244 ofree = sc->sc_txfree;
1245
1246 /*
1247 * Loop through the send queue, setting up transmit descriptors
1248 * until we drain the queue, or use up all available transmit
1249 * descriptors.
1250 */
1251 for (;;) {
1252 /* Grab a packet off the queue. */
1253 IFQ_POLL(&ifp->if_snd, m0);
1254 if (m0 == NULL)
1255 break;
1256
1257 DPRINTF(WM_DEBUG_TX,
1258 ("%s: TX: have packet to transmit: %p\n",
1259 sc->sc_dev.dv_xname, m0));
1260
1261 /* Get a work queue entry. */
1262 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1263 wm_txintr(sc);
1264 if (sc->sc_txsfree == 0) {
1265 DPRINTF(WM_DEBUG_TX,
1266 ("%s: TX: no free job descriptors\n",
1267 sc->sc_dev.dv_xname));
1268 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1269 break;
1270 }
1271 }
1272
1273 txs = &sc->sc_txsoft[sc->sc_txsnext];
1274 dmamap = txs->txs_dmamap;
1275
1276 /*
1277 * Load the DMA map. If this fails, the packet either
1278 * didn't fit in the allotted number of segments, or we
1279 * were short on resources. For the too-many-segments
1280 * case, we simply report an error and drop the packet,
1281 * since we can't sanely copy a jumbo packet to a single
1282 * buffer.
1283 */
1284 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1285 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1286 if (error) {
1287 if (error == EFBIG) {
1288 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1289 printf("%s: Tx packet consumes too many "
1290 "DMA segments, dropping...\n",
1291 sc->sc_dev.dv_xname);
1292 IFQ_DEQUEUE(&ifp->if_snd, m0);
1293 m_freem(m0);
1294 continue;
1295 }
1296 /*
1297 * Short on resources, just stop for now.
1298 */
1299 DPRINTF(WM_DEBUG_TX,
1300 ("%s: TX: dmamap load failed: %d\n",
1301 sc->sc_dev.dv_xname, error));
1302 break;
1303 }
1304
1305 /*
1306 * Ensure we have enough descriptors free to describe
1307 * the packet. Note, we always reserve one descriptor
1308 * at the end of the ring due to the semantics of the
1309 * TDT register, plus one more in the event we need
1310 * to re-load checksum offload context.
1311 */
1312 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1313 /*
1314 * Not enough free descriptors to transmit this
1315 * packet. We haven't committed anything yet,
1316 * so just unload the DMA map, put the packet
1317 * pack on the queue, and punt. Notify the upper
1318 * layer that there are no more slots left.
1319 */
1320 DPRINTF(WM_DEBUG_TX,
1321 ("%s: TX: need %d descriptors, have %d\n",
1322 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1323 sc->sc_txfree - 1));
1324 ifp->if_flags |= IFF_OACTIVE;
1325 bus_dmamap_unload(sc->sc_dmat, dmamap);
1326 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1327 break;
1328 }
1329
1330 IFQ_DEQUEUE(&ifp->if_snd, m0);
1331
1332 /*
1333 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1334 */
1335
1336 /* Sync the DMA map. */
1337 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1338 BUS_DMASYNC_PREWRITE);
1339
1340 DPRINTF(WM_DEBUG_TX,
1341 ("%s: TX: packet has %d DMA segments\n",
1342 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1343
1344 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1345
1346 /*
1347 * Store a pointer to the packet so that we can free it
1348 * later.
1349 *
1350 * Initially, we consider the number of descriptors the
1351 * packet uses the number of DMA segments. This may be
1352 * incremented by 1 if we do checksum offload (a descriptor
1353 * is used to set the checksum context).
1354 */
1355 txs->txs_mbuf = m0;
1356 txs->txs_firstdesc = sc->sc_txnext;
1357 txs->txs_ndesc = dmamap->dm_nsegs;
1358
1359 /*
1360 * Set up checksum offload parameters for
1361 * this packet.
1362 */
1363 if (m0->m_pkthdr.csum_flags &
1364 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1365 if (wm_tx_cksum(sc, txs, &cksumcmd,
1366 &cksumfields) != 0) {
1367 /* Error message already displayed. */
1368 bus_dmamap_unload(sc->sc_dmat, dmamap);
1369 continue;
1370 }
1371 } else {
1372 cksumcmd = 0;
1373 cksumfields = 0;
1374 }
1375
1376 cksumcmd |= htole32(WTX_CMD_IDE);
1377
1378 /*
1379 * Initialize the transmit descriptor.
1380 */
1381 for (nexttx = sc->sc_txnext, seg = 0;
1382 seg < dmamap->dm_nsegs;
1383 seg++, nexttx = WM_NEXTTX(nexttx)) {
1384 /*
1385 * Note: we currently only use 32-bit DMA
1386 * addresses.
1387 */
1388 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1389 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1390 htole32(dmamap->dm_segs[seg].ds_addr);
1391 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1392 htole32(dmamap->dm_segs[seg].ds_len);
1393 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1394 cksumfields;
1395 lasttx = nexttx;
1396
1397 DPRINTF(WM_DEBUG_TX,
1398 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1399 sc->sc_dev.dv_xname, nexttx,
1400 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1401 (uint32_t) dmamap->dm_segs[seg].ds_len));
1402 }
1403
1404 /*
1405 * Set up the command byte on the last descriptor of
1406 * the packet. If we're in the interrupt delay window,
1407 * delay the interrupt.
1408 */
1409 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1410 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1411
1412 #if 0 /* XXXJRT */
1413 /*
1414 * If VLANs are enabled and the packet has a VLAN tag, set
1415 * up the descriptor to encapsulate the packet for us.
1416 *
1417 * This is only valid on the last descriptor of the packet.
1418 */
1419 if (sc->sc_ethercom.ec_nvlans != 0 &&
1420 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1421 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1422 htole32(WTX_CMD_VLE);
1423 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1424 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1425 }
1426 #endif /* XXXJRT */
1427
1428 txs->txs_lastdesc = lasttx;
1429
1430 DPRINTF(WM_DEBUG_TX,
1431 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1432 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1433
1434 /* Sync the descriptors we're using. */
1435 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1436 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1437
1438 /* Give the packet to the chip. */
1439 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1440
1441 DPRINTF(WM_DEBUG_TX,
1442 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1443
1444 DPRINTF(WM_DEBUG_TX,
1445 ("%s: TX: finished transmitting packet, job %d\n",
1446 sc->sc_dev.dv_xname, sc->sc_txsnext));
1447
1448 /* Advance the tx pointer. */
1449 sc->sc_txfree -= txs->txs_ndesc;
1450 sc->sc_txnext = nexttx;
1451
1452 sc->sc_txsfree--;
1453 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1454
1455 #if NBPFILTER > 0
1456 /* Pass the packet to any BPF listeners. */
1457 if (ifp->if_bpf)
1458 bpf_mtap(ifp->if_bpf, m0);
1459 #endif /* NBPFILTER > 0 */
1460 }
1461
1462 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1463 /* No more slots; notify upper layer. */
1464 ifp->if_flags |= IFF_OACTIVE;
1465 }
1466
1467 if (sc->sc_txfree != ofree) {
1468 /* Set a watchdog timer in case the chip flakes out. */
1469 ifp->if_timer = 5;
1470 }
1471 }
1472
1473 /*
1474 * wm_watchdog: [ifnet interface function]
1475 *
1476 * Watchdog timer handler.
1477 */
1478 static void
1479 wm_watchdog(struct ifnet *ifp)
1480 {
1481 struct wm_softc *sc = ifp->if_softc;
1482
1483 /*
1484 * Since we're using delayed interrupts, sweep up
1485 * before we report an error.
1486 */
1487 wm_txintr(sc);
1488
1489 if (sc->sc_txfree != WM_NTXDESC) {
1490 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1491 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1492 sc->sc_txnext);
1493 ifp->if_oerrors++;
1494
1495 /* Reset the interface. */
1496 (void) wm_init(ifp);
1497 }
1498
1499 /* Try to get more packets going. */
1500 wm_start(ifp);
1501 }
1502
1503 /*
1504 * wm_ioctl: [ifnet interface function]
1505 *
1506 * Handle control requests from the operator.
1507 */
1508 static int
1509 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1510 {
1511 struct wm_softc *sc = ifp->if_softc;
1512 struct ifreq *ifr = (struct ifreq *) data;
1513 int s, error;
1514
1515 s = splnet();
1516
1517 switch (cmd) {
1518 case SIOCSIFMEDIA:
1519 case SIOCGIFMEDIA:
1520 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1521 break;
1522 default:
1523 error = ether_ioctl(ifp, cmd, data);
1524 if (error == ENETRESET) {
1525 /*
1526 * Multicast list has changed; set the hardware filter
1527 * accordingly.
1528 */
1529 wm_set_filter(sc);
1530 error = 0;
1531 }
1532 break;
1533 }
1534
1535 /* Try to get more packets going. */
1536 wm_start(ifp);
1537
1538 splx(s);
1539 return (error);
1540 }
1541
1542 /*
1543 * wm_intr:
1544 *
1545 * Interrupt service routine.
1546 */
1547 static int
1548 wm_intr(void *arg)
1549 {
1550 struct wm_softc *sc = arg;
1551 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1552 uint32_t icr;
1553 int wantinit, handled = 0;
1554
1555 for (wantinit = 0; wantinit == 0;) {
1556 icr = CSR_READ(sc, WMREG_ICR);
1557 if ((icr & sc->sc_icr) == 0)
1558 break;
1559
1560 #if 0 /*NRND > 0*/
1561 if (RND_ENABLED(&sc->rnd_source))
1562 rnd_add_uint32(&sc->rnd_source, icr);
1563 #endif
1564
1565 handled = 1;
1566
1567 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1568 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1569 DPRINTF(WM_DEBUG_RX,
1570 ("%s: RX: got Rx intr 0x%08x\n",
1571 sc->sc_dev.dv_xname,
1572 icr & (ICR_RXDMT0|ICR_RXT0)));
1573 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1574 }
1575 #endif
1576 wm_rxintr(sc);
1577
1578 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1579 if (icr & ICR_TXDW) {
1580 DPRINTF(WM_DEBUG_TX,
1581 ("%s: TX: got TDXW interrupt\n",
1582 sc->sc_dev.dv_xname));
1583 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1584 }
1585 #endif
1586 wm_txintr(sc);
1587
1588 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1589 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1590 wm_linkintr(sc, icr);
1591 }
1592
1593 if (icr & ICR_RXO) {
1594 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1595 wantinit = 1;
1596 }
1597 }
1598
1599 if (handled) {
1600 if (wantinit)
1601 wm_init(ifp);
1602
1603 /* Try to get more packets going. */
1604 wm_start(ifp);
1605 }
1606
1607 return (handled);
1608 }
1609
1610 /*
1611 * wm_txintr:
1612 *
1613 * Helper; handle transmit interrupts.
1614 */
1615 static void
1616 wm_txintr(struct wm_softc *sc)
1617 {
1618 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1619 struct wm_txsoft *txs;
1620 uint8_t status;
1621 int i;
1622
1623 ifp->if_flags &= ~IFF_OACTIVE;
1624
1625 /*
1626 * Go through the Tx list and free mbufs for those
1627 * frames which have been transmitted.
1628 */
1629 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1630 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1631 txs = &sc->sc_txsoft[i];
1632
1633 DPRINTF(WM_DEBUG_TX,
1634 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1635
1636 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1637 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1638
1639 status = le32toh(sc->sc_txdescs[
1640 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1641 if ((status & WTX_ST_DD) == 0) {
1642 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1643 BUS_DMASYNC_PREREAD);
1644 break;
1645 }
1646
1647 DPRINTF(WM_DEBUG_TX,
1648 ("%s: TX: job %d done: descs %d..%d\n",
1649 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1650 txs->txs_lastdesc));
1651
1652 /*
1653 * XXX We should probably be using the statistics
1654 * XXX registers, but I don't know if they exist
1655 * XXX on chips before the i82544.
1656 */
1657
1658 #ifdef WM_EVENT_COUNTERS
1659 if (status & WTX_ST_TU)
1660 WM_EVCNT_INCR(&sc->sc_ev_tu);
1661 #endif /* WM_EVENT_COUNTERS */
1662
1663 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1664 ifp->if_oerrors++;
1665 if (status & WTX_ST_LC)
1666 printf("%s: late collision\n",
1667 sc->sc_dev.dv_xname);
1668 else if (status & WTX_ST_EC) {
1669 ifp->if_collisions += 16;
1670 printf("%s: excessive collisions\n",
1671 sc->sc_dev.dv_xname);
1672 }
1673 } else
1674 ifp->if_opackets++;
1675
1676 sc->sc_txfree += txs->txs_ndesc;
1677 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1678 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1679 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1680 m_freem(txs->txs_mbuf);
1681 txs->txs_mbuf = NULL;
1682 }
1683
1684 /* Update the dirty transmit buffer pointer. */
1685 sc->sc_txsdirty = i;
1686 DPRINTF(WM_DEBUG_TX,
1687 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1688
1689 /*
1690 * If there are no more pending transmissions, cancel the watchdog
1691 * timer.
1692 */
1693 if (sc->sc_txsfree == WM_TXQUEUELEN)
1694 ifp->if_timer = 0;
1695 }
1696
1697 /*
1698 * wm_rxintr:
1699 *
1700 * Helper; handle receive interrupts.
1701 */
1702 static void
1703 wm_rxintr(struct wm_softc *sc)
1704 {
1705 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1706 struct wm_rxsoft *rxs;
1707 struct mbuf *m;
1708 int i, len;
1709 uint8_t status, errors;
1710
1711 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1712 rxs = &sc->sc_rxsoft[i];
1713
1714 DPRINTF(WM_DEBUG_RX,
1715 ("%s: RX: checking descriptor %d\n",
1716 sc->sc_dev.dv_xname, i));
1717
1718 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1719
1720 status = sc->sc_rxdescs[i].wrx_status;
1721 errors = sc->sc_rxdescs[i].wrx_errors;
1722 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1723
1724 if ((status & WRX_ST_DD) == 0) {
1725 /*
1726 * We have processed all of the receive descriptors.
1727 */
1728 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1729 break;
1730 }
1731
1732 if (__predict_false(sc->sc_rxdiscard)) {
1733 DPRINTF(WM_DEBUG_RX,
1734 ("%s: RX: discarding contents of descriptor %d\n",
1735 sc->sc_dev.dv_xname, i));
1736 WM_INIT_RXDESC(sc, i);
1737 if (status & WRX_ST_EOP) {
1738 /* Reset our state. */
1739 DPRINTF(WM_DEBUG_RX,
1740 ("%s: RX: resetting rxdiscard -> 0\n",
1741 sc->sc_dev.dv_xname));
1742 sc->sc_rxdiscard = 0;
1743 }
1744 continue;
1745 }
1746
1747 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1748 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1749
1750 m = rxs->rxs_mbuf;
1751
1752 /*
1753 * Add a new receive buffer to the ring.
1754 */
1755 if (wm_add_rxbuf(sc, i) != 0) {
1756 /*
1757 * Failed, throw away what we've done so
1758 * far, and discard the rest of the packet.
1759 */
1760 ifp->if_ierrors++;
1761 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1762 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1763 WM_INIT_RXDESC(sc, i);
1764 if ((status & WRX_ST_EOP) == 0)
1765 sc->sc_rxdiscard = 1;
1766 if (sc->sc_rxhead != NULL)
1767 m_freem(sc->sc_rxhead);
1768 WM_RXCHAIN_RESET(sc);
1769 DPRINTF(WM_DEBUG_RX,
1770 ("%s: RX: Rx buffer allocation failed, "
1771 "dropping packet%s\n", sc->sc_dev.dv_xname,
1772 sc->sc_rxdiscard ? " (discard)" : ""));
1773 continue;
1774 }
1775
1776 WM_RXCHAIN_LINK(sc, m);
1777
1778 m->m_len = len;
1779
1780 DPRINTF(WM_DEBUG_RX,
1781 ("%s: RX: buffer at %p len %d\n",
1782 sc->sc_dev.dv_xname, m->m_data, len));
1783
1784 /*
1785 * If this is not the end of the packet, keep
1786 * looking.
1787 */
1788 if ((status & WRX_ST_EOP) == 0) {
1789 sc->sc_rxlen += len;
1790 DPRINTF(WM_DEBUG_RX,
1791 ("%s: RX: not yet EOP, rxlen -> %d\n",
1792 sc->sc_dev.dv_xname, sc->sc_rxlen));
1793 continue;
1794 }
1795
1796 /*
1797 * Okay, we have the entire packet now...
1798 */
1799 *sc->sc_rxtailp = NULL;
1800 m = sc->sc_rxhead;
1801 len += sc->sc_rxlen;
1802
1803 WM_RXCHAIN_RESET(sc);
1804
1805 DPRINTF(WM_DEBUG_RX,
1806 ("%s: RX: have entire packet, len -> %d\n",
1807 sc->sc_dev.dv_xname, len));
1808
1809 /*
1810 * If an error occurred, update stats and drop the packet.
1811 */
1812 if (errors &
1813 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1814 ifp->if_ierrors++;
1815 if (errors & WRX_ER_SE)
1816 printf("%s: symbol error\n",
1817 sc->sc_dev.dv_xname);
1818 else if (errors & WRX_ER_SEQ)
1819 printf("%s: receive sequence error\n",
1820 sc->sc_dev.dv_xname);
1821 else if (errors & WRX_ER_CE)
1822 printf("%s: CRC error\n",
1823 sc->sc_dev.dv_xname);
1824 m_freem(m);
1825 continue;
1826 }
1827
1828 /*
1829 * No errors. Receive the packet.
1830 *
1831 * Note, we have configured the chip to include the
1832 * CRC with every packet.
1833 */
1834 m->m_flags |= M_HASFCS;
1835 m->m_pkthdr.rcvif = ifp;
1836 m->m_pkthdr.len = len;
1837
1838 #if 0 /* XXXJRT */
1839 /*
1840 * If VLANs are enabled, VLAN packets have been unwrapped
1841 * for us. Associate the tag with the packet.
1842 */
1843 if (sc->sc_ethercom.ec_nvlans != 0 &&
1844 (status & WRX_ST_VP) != 0) {
1845 struct m_tag *vtag;
1846
1847 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1848 M_NOWAIT);
1849 if (vtag == NULL) {
1850 ifp->if_ierrors++;
1851 printf("%s: unable to allocate VLAN tag\n",
1852 sc->sc_dev.dv_xname);
1853 m_freem(m);
1854 continue;
1855 }
1856
1857 *(u_int *)(vtag + 1) =
1858 le16toh(sc->sc_rxdescs[i].wrx_special);
1859 }
1860 #endif /* XXXJRT */
1861
1862 /*
1863 * Set up checksum info for this packet.
1864 */
1865 if (status & WRX_ST_IPCS) {
1866 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1867 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1868 if (errors & WRX_ER_IPE)
1869 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1870 }
1871 if (status & WRX_ST_TCPCS) {
1872 /*
1873 * Note: we don't know if this was TCP or UDP,
1874 * so we just set both bits, and expect the
1875 * upper layers to deal.
1876 */
1877 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1878 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1879 if (errors & WRX_ER_TCPE)
1880 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1881 }
1882
1883 ifp->if_ipackets++;
1884
1885 #if NBPFILTER > 0
1886 /* Pass this up to any BPF listeners. */
1887 if (ifp->if_bpf)
1888 bpf_mtap(ifp->if_bpf, m);
1889 #endif /* NBPFILTER > 0 */
1890
1891 /* Pass it on. */
1892 (*ifp->if_input)(ifp, m);
1893 }
1894
1895 /* Update the receive pointer. */
1896 sc->sc_rxptr = i;
1897
1898 DPRINTF(WM_DEBUG_RX,
1899 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1900 }
1901
1902 /*
1903 * wm_linkintr:
1904 *
1905 * Helper; handle link interrupts.
1906 */
1907 static void
1908 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1909 {
1910 uint32_t status;
1911
1912 /*
1913 * If we get a link status interrupt on a 1000BASE-T
1914 * device, just fall into the normal MII tick path.
1915 */
1916 if (sc->sc_flags & WM_F_HAS_MII) {
1917 if (icr & ICR_LSC) {
1918 DPRINTF(WM_DEBUG_LINK,
1919 ("%s: LINK: LSC -> mii_tick\n",
1920 sc->sc_dev.dv_xname));
1921 mii_tick(&sc->sc_mii);
1922 } else if (icr & ICR_RXSEQ) {
1923 DPRINTF(WM_DEBUG_LINK,
1924 ("%s: LINK Receive sequence error\n",
1925 sc->sc_dev.dv_xname));
1926 }
1927 return;
1928 }
1929
1930 /*
1931 * If we are now receiving /C/, check for link again in
1932 * a couple of link clock ticks.
1933 */
1934 if (icr & ICR_RXCFG) {
1935 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1936 sc->sc_dev.dv_xname));
1937 sc->sc_tbi_anstate = 2;
1938 }
1939
1940 if (icr & ICR_LSC) {
1941 status = CSR_READ(sc, WMREG_STATUS);
1942 if (status & STATUS_LU) {
1943 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1944 sc->sc_dev.dv_xname,
1945 (status & STATUS_FD) ? "FDX" : "HDX"));
1946 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1947 if (status & STATUS_FD)
1948 sc->sc_tctl |=
1949 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1950 else
1951 sc->sc_tctl |=
1952 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1953 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1954 sc->sc_tbi_linkup = 1;
1955 } else {
1956 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1957 sc->sc_dev.dv_xname));
1958 sc->sc_tbi_linkup = 0;
1959 }
1960 sc->sc_tbi_anstate = 2;
1961 wm_tbi_set_linkled(sc);
1962 } else if (icr & ICR_RXSEQ) {
1963 DPRINTF(WM_DEBUG_LINK,
1964 ("%s: LINK: Receive sequence error\n",
1965 sc->sc_dev.dv_xname));
1966 }
1967 }
1968
1969 /*
1970 * wm_tick:
1971 *
1972 * One second timer, used to check link status, sweep up
1973 * completed transmit jobs, etc.
1974 */
1975 static void
1976 wm_tick(void *arg)
1977 {
1978 struct wm_softc *sc = arg;
1979 int s;
1980
1981 s = splnet();
1982
1983 if (sc->sc_flags & WM_F_HAS_MII)
1984 mii_tick(&sc->sc_mii);
1985 else
1986 wm_tbi_check_link(sc);
1987
1988 splx(s);
1989
1990 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1991 }
1992
1993 /*
1994 * wm_reset:
1995 *
1996 * Reset the i82542 chip.
1997 */
1998 static void
1999 wm_reset(struct wm_softc *sc)
2000 {
2001 int i;
2002
2003 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
2004 delay(10000);
2005
2006 for (i = 0; i < 1000; i++) {
2007 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
2008 return;
2009 delay(20);
2010 }
2011
2012 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
2013 printf("%s: WARNING: reset failed to complete\n",
2014 sc->sc_dev.dv_xname);
2015 }
2016
2017 /*
2018 * wm_init: [ifnet interface function]
2019 *
2020 * Initialize the interface. Must be called at splnet().
2021 */
2022 static int
2023 wm_init(struct ifnet *ifp)
2024 {
2025 struct wm_softc *sc = ifp->if_softc;
2026 struct wm_rxsoft *rxs;
2027 int i, error = 0;
2028 uint32_t reg;
2029
2030 /*
2031 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
2032 * There is a small but measurable benefit to avoiding the adjusment
2033 * of the descriptor so that the headers are aligned, for normal mtu,
2034 * on such platforms. One possibility is that the DMA itself is
2035 * slightly more efficient if the front of the entire packet (instead
2036 * of the front of the headers) is aligned.
2037 *
2038 * Note we must always set align_tweak to 0 if we are using
2039 * jumbo frames.
2040 */
2041 #ifdef __NO_STRICT_ALIGNMENT
2042 sc->sc_align_tweak = 0;
2043 #else
2044 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
2045 sc->sc_align_tweak = 0;
2046 else
2047 sc->sc_align_tweak = 2;
2048 #endif /* __NO_STRICT_ALIGNMENT */
2049
2050 /* Cancel any pending I/O. */
2051 wm_stop(ifp, 0);
2052
2053 /* Reset the chip to a known state. */
2054 wm_reset(sc);
2055
2056 /* Initialize the transmit descriptor ring. */
2057 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
2058 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
2059 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2060 sc->sc_txfree = WM_NTXDESC;
2061 sc->sc_txnext = 0;
2062
2063 sc->sc_txctx_ipcs = 0xffffffff;
2064 sc->sc_txctx_tucs = 0xffffffff;
2065
2066 if (sc->sc_type < WM_T_82543) {
2067 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
2068 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
2069 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
2070 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
2071 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
2072 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
2073 } else {
2074 CSR_WRITE(sc, WMREG_TBDAH, 0);
2075 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
2076 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
2077 CSR_WRITE(sc, WMREG_TDH, 0);
2078 CSR_WRITE(sc, WMREG_TDT, 0);
2079 CSR_WRITE(sc, WMREG_TIDV, 128);
2080
2081 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
2082 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
2083 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
2084 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
2085 }
2086 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
2087 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
2088
2089 /* Initialize the transmit job descriptors. */
2090 for (i = 0; i < WM_TXQUEUELEN; i++)
2091 sc->sc_txsoft[i].txs_mbuf = NULL;
2092 sc->sc_txsfree = WM_TXQUEUELEN;
2093 sc->sc_txsnext = 0;
2094 sc->sc_txsdirty = 0;
2095
2096 /*
2097 * Initialize the receive descriptor and receive job
2098 * descriptor rings.
2099 */
2100 if (sc->sc_type < WM_T_82543) {
2101 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2102 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2103 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2104 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2105 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2106 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2107
2108 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2109 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2110 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2111 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2112 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2113 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2114 } else {
2115 CSR_WRITE(sc, WMREG_RDBAH, 0);
2116 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2117 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2118 CSR_WRITE(sc, WMREG_RDH, 0);
2119 CSR_WRITE(sc, WMREG_RDT, 0);
2120 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2121 }
2122 for (i = 0; i < WM_NRXDESC; i++) {
2123 rxs = &sc->sc_rxsoft[i];
2124 if (rxs->rxs_mbuf == NULL) {
2125 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2126 printf("%s: unable to allocate or map rx "
2127 "buffer %d, error = %d\n",
2128 sc->sc_dev.dv_xname, i, error);
2129 /*
2130 * XXX Should attempt to run with fewer receive
2131 * XXX buffers instead of just failing.
2132 */
2133 wm_rxdrain(sc);
2134 goto out;
2135 }
2136 } else
2137 WM_INIT_RXDESC(sc, i);
2138 }
2139 sc->sc_rxptr = 0;
2140 sc->sc_rxdiscard = 0;
2141 WM_RXCHAIN_RESET(sc);
2142
2143 /*
2144 * Clear out the VLAN table -- we don't use it (yet).
2145 */
2146 CSR_WRITE(sc, WMREG_VET, 0);
2147 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2148 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2149
2150 /*
2151 * Set up flow-control parameters.
2152 *
2153 * XXX Values could probably stand some tuning.
2154 */
2155 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2156 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2157 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2158 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2159
2160 if (sc->sc_type < WM_T_82543) {
2161 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2162 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2163 } else {
2164 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2165 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2166 }
2167 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2168 }
2169
2170 #if 0 /* XXXJRT */
2171 /* Deal with VLAN enables. */
2172 if (sc->sc_ethercom.ec_nvlans != 0)
2173 sc->sc_ctrl |= CTRL_VME;
2174 else
2175 #endif /* XXXJRT */
2176 sc->sc_ctrl &= ~CTRL_VME;
2177
2178 /* Write the control registers. */
2179 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2180 #if 0
2181 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2182 #endif
2183
2184 /*
2185 * Set up checksum offload parameters.
2186 */
2187 reg = CSR_READ(sc, WMREG_RXCSUM);
2188 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2189 reg |= RXCSUM_IPOFL;
2190 else
2191 reg &= ~RXCSUM_IPOFL;
2192 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2193 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2194 else {
2195 reg &= ~RXCSUM_TUOFL;
2196 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2197 reg &= ~RXCSUM_IPOFL;
2198 }
2199 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2200
2201 /*
2202 * Set up the interrupt registers.
2203 */
2204 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2205 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2206 ICR_RXO | ICR_RXT0;
2207 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2208 sc->sc_icr |= ICR_RXCFG;
2209 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2210
2211 /* Set up the inter-packet gap. */
2212 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2213
2214 #if 0 /* XXXJRT */
2215 /* Set the VLAN ethernetype. */
2216 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2217 #endif
2218
2219 /*
2220 * Set up the transmit control register; we start out with
2221 * a collision distance suitable for FDX, but update it whe
2222 * we resolve the media type.
2223 */
2224 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2225 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2226 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2227
2228 /* Set the media. */
2229 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2230
2231 /*
2232 * Set up the receive control register; we actually program
2233 * the register when we set the receive filter. Use multicast
2234 * address offset type 0.
2235 *
2236 * Only the i82544 has the ability to strip the incoming
2237 * CRC, so we don't enable that feature.
2238 */
2239 sc->sc_mchash_type = 0;
2240 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2241 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2242
2243 if(MCLBYTES == 2048) {
2244 sc->sc_rctl |= RCTL_2k;
2245 } else {
2246 /*
2247 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2248 * XXX segments, dropping" -- why?
2249 */
2250 #if 0
2251 if(sc->sc_type >= WM_T_82543) {
2252 switch(MCLBYTES) {
2253 case 4096:
2254 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2255 break;
2256 case 8192:
2257 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2258 break;
2259 case 16384:
2260 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2261 break;
2262 default:
2263 panic("wm_init: MCLBYTES %d unsupported",
2264 MCLBYTES);
2265 break;
2266 }
2267 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2268 #else
2269 panic("wm_init: MCLBYTES > 2048 not supported.");
2270 #endif
2271 }
2272
2273 /* Set the receive filter. */
2274 wm_set_filter(sc);
2275
2276 /* Start the one second link check clock. */
2277 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2278
2279 /* ...all done! */
2280 ifp->if_flags |= IFF_RUNNING;
2281 ifp->if_flags &= ~IFF_OACTIVE;
2282
2283 out:
2284 if (error)
2285 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2286 return (error);
2287 }
2288
2289 /*
2290 * wm_rxdrain:
2291 *
2292 * Drain the receive queue.
2293 */
2294 static void
2295 wm_rxdrain(struct wm_softc *sc)
2296 {
2297 struct wm_rxsoft *rxs;
2298 int i;
2299
2300 for (i = 0; i < WM_NRXDESC; i++) {
2301 rxs = &sc->sc_rxsoft[i];
2302 if (rxs->rxs_mbuf != NULL) {
2303 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2304 m_freem(rxs->rxs_mbuf);
2305 rxs->rxs_mbuf = NULL;
2306 }
2307 }
2308 }
2309
2310 /*
2311 * wm_stop: [ifnet interface function]
2312 *
2313 * Stop transmission on the interface.
2314 */
2315 static void
2316 wm_stop(struct ifnet *ifp, int disable)
2317 {
2318 struct wm_softc *sc = ifp->if_softc;
2319 struct wm_txsoft *txs;
2320 int i;
2321
2322 /* Stop the one second clock. */
2323 callout_stop(&sc->sc_tick_ch);
2324
2325 if (sc->sc_flags & WM_F_HAS_MII) {
2326 /* Down the MII. */
2327 mii_down(&sc->sc_mii);
2328 }
2329
2330 /* Stop the transmit and receive processes. */
2331 CSR_WRITE(sc, WMREG_TCTL, 0);
2332 CSR_WRITE(sc, WMREG_RCTL, 0);
2333
2334 /* Release any queued transmit buffers. */
2335 for (i = 0; i < WM_TXQUEUELEN; i++) {
2336 txs = &sc->sc_txsoft[i];
2337 if (txs->txs_mbuf != NULL) {
2338 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2339 m_freem(txs->txs_mbuf);
2340 txs->txs_mbuf = NULL;
2341 }
2342 }
2343
2344 if (disable)
2345 wm_rxdrain(sc);
2346
2347 /* Mark the interface as down and cancel the watchdog timer. */
2348 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2349 ifp->if_timer = 0;
2350 }
2351
2352 /*
2353 * wm_acquire_eeprom:
2354 *
2355 * Perform the EEPROM handshake required on some chips.
2356 */
2357 static int
2358 wm_acquire_eeprom(struct wm_softc *sc)
2359 {
2360 uint32_t reg;
2361 int x;
2362
2363 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2364 reg = CSR_READ(sc, WMREG_EECD);
2365
2366 /* Request EEPROM access. */
2367 reg |= EECD_EE_REQ;
2368 CSR_WRITE(sc, WMREG_EECD, reg);
2369
2370 /* ..and wait for it to be granted. */
2371 for (x = 0; x < 100; x++) {
2372 reg = CSR_READ(sc, WMREG_EECD);
2373 if (reg & EECD_EE_GNT)
2374 break;
2375 delay(5);
2376 }
2377 if ((reg & EECD_EE_GNT) == 0) {
2378 aprint_error("%s: could not acquire EEPROM GNT\n",
2379 sc->sc_dev.dv_xname);
2380 reg &= ~EECD_EE_REQ;
2381 CSR_WRITE(sc, WMREG_EECD, reg);
2382 return (1);
2383 }
2384 }
2385
2386 return (0);
2387 }
2388
2389 /*
2390 * wm_release_eeprom:
2391 *
2392 * Release the EEPROM mutex.
2393 */
2394 static void
2395 wm_release_eeprom(struct wm_softc *sc)
2396 {
2397 uint32_t reg;
2398
2399 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2400 reg = CSR_READ(sc, WMREG_EECD);
2401 reg &= ~EECD_EE_REQ;
2402 CSR_WRITE(sc, WMREG_EECD, reg);
2403 }
2404 }
2405
2406 /*
2407 * wm_eeprom_sendbits:
2408 *
2409 * Send a series of bits to the EEPROM.
2410 */
2411 static void
2412 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
2413 {
2414 uint32_t reg;
2415 int x;
2416
2417 reg = CSR_READ(sc, WMREG_EECD);
2418
2419 for (x = nbits; x > 0; x--) {
2420 if (bits & (1U << (x - 1)))
2421 reg |= EECD_DI;
2422 else
2423 reg &= ~EECD_DI;
2424 CSR_WRITE(sc, WMREG_EECD, reg);
2425 delay(2);
2426 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2427 delay(2);
2428 CSR_WRITE(sc, WMREG_EECD, reg);
2429 delay(2);
2430 }
2431 }
2432
2433 /*
2434 * wm_eeprom_recvbits:
2435 *
2436 * Receive a series of bits from the EEPROM.
2437 */
2438 static void
2439 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
2440 {
2441 uint32_t reg, val;
2442 int x;
2443
2444 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
2445
2446 val = 0;
2447 for (x = nbits; x > 0; x--) {
2448 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2449 delay(2);
2450 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2451 val |= (1U << (x - 1));
2452 CSR_WRITE(sc, WMREG_EECD, reg);
2453 delay(2);
2454 }
2455 *valp = val;
2456 }
2457
2458 /*
2459 * wm_read_eeprom_uwire:
2460 *
2461 * Read a word from the EEPROM using the MicroWire protocol.
2462 */
2463 static int
2464 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2465 {
2466 uint32_t reg, val;
2467 int i;
2468
2469 for (i = 0; i < wordcnt; i++) {
2470 /* Clear SK and DI. */
2471 reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
2472 CSR_WRITE(sc, WMREG_EECD, reg);
2473
2474 /* Set CHIP SELECT. */
2475 reg |= EECD_CS;
2476 CSR_WRITE(sc, WMREG_EECD, reg);
2477 delay(2);
2478
2479 /* Shift in the READ command. */
2480 wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
2481
2482 /* Shift in address. */
2483 wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
2484
2485 /* Shift out the data. */
2486 wm_eeprom_recvbits(sc, &val, 16);
2487 data[i] = val & 0xffff;
2488
2489 /* Clear CHIP SELECT. */
2490 reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
2491 CSR_WRITE(sc, WMREG_EECD, reg);
2492 delay(2);
2493 }
2494
2495 return (0);
2496 }
2497
2498 /*
2499 * wm_read_eeprom:
2500 *
2501 * Read data from the serial EEPROM.
2502 */
2503 static int
2504 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2505 {
2506 int rv;
2507
2508 if (wm_acquire_eeprom(sc))
2509 return (1);
2510
2511 rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
2512
2513 wm_release_eeprom(sc);
2514 return (rv);
2515 }
2516
2517 /*
2518 * wm_add_rxbuf:
2519 *
2520 * Add a receive buffer to the indiciated descriptor.
2521 */
2522 static int
2523 wm_add_rxbuf(struct wm_softc *sc, int idx)
2524 {
2525 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2526 struct mbuf *m;
2527 int error;
2528
2529 MGETHDR(m, M_DONTWAIT, MT_DATA);
2530 if (m == NULL)
2531 return (ENOBUFS);
2532
2533 MCLGET(m, M_DONTWAIT);
2534 if ((m->m_flags & M_EXT) == 0) {
2535 m_freem(m);
2536 return (ENOBUFS);
2537 }
2538
2539 if (rxs->rxs_mbuf != NULL)
2540 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2541
2542 rxs->rxs_mbuf = m;
2543
2544 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2545 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2546 BUS_DMA_READ|BUS_DMA_NOWAIT);
2547 if (error) {
2548 printf("%s: unable to load rx DMA map %d, error = %d\n",
2549 sc->sc_dev.dv_xname, idx, error);
2550 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2551 }
2552
2553 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2554 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2555
2556 WM_INIT_RXDESC(sc, idx);
2557
2558 return (0);
2559 }
2560
2561 /*
2562 * wm_set_ral:
2563 *
2564 * Set an entery in the receive address list.
2565 */
2566 static void
2567 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2568 {
2569 uint32_t ral_lo, ral_hi;
2570
2571 if (enaddr != NULL) {
2572 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2573 (enaddr[3] << 24);
2574 ral_hi = enaddr[4] | (enaddr[5] << 8);
2575 ral_hi |= RAL_AV;
2576 } else {
2577 ral_lo = 0;
2578 ral_hi = 0;
2579 }
2580
2581 if (sc->sc_type >= WM_T_82544) {
2582 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2583 ral_lo);
2584 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2585 ral_hi);
2586 } else {
2587 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2588 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2589 }
2590 }
2591
2592 /*
2593 * wm_mchash:
2594 *
2595 * Compute the hash of the multicast address for the 4096-bit
2596 * multicast filter.
2597 */
2598 static uint32_t
2599 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2600 {
2601 static const int lo_shift[4] = { 4, 3, 2, 0 };
2602 static const int hi_shift[4] = { 4, 5, 6, 8 };
2603 uint32_t hash;
2604
2605 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2606 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2607
2608 return (hash & 0xfff);
2609 }
2610
2611 /*
2612 * wm_set_filter:
2613 *
2614 * Set up the receive filter.
2615 */
2616 static void
2617 wm_set_filter(struct wm_softc *sc)
2618 {
2619 struct ethercom *ec = &sc->sc_ethercom;
2620 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2621 struct ether_multi *enm;
2622 struct ether_multistep step;
2623 bus_addr_t mta_reg;
2624 uint32_t hash, reg, bit;
2625 int i;
2626
2627 if (sc->sc_type >= WM_T_82544)
2628 mta_reg = WMREG_CORDOVA_MTA;
2629 else
2630 mta_reg = WMREG_MTA;
2631
2632 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2633
2634 if (ifp->if_flags & IFF_BROADCAST)
2635 sc->sc_rctl |= RCTL_BAM;
2636 if (ifp->if_flags & IFF_PROMISC) {
2637 sc->sc_rctl |= RCTL_UPE;
2638 goto allmulti;
2639 }
2640
2641 /*
2642 * Set the station address in the first RAL slot, and
2643 * clear the remaining slots.
2644 */
2645 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2646 for (i = 1; i < WM_RAL_TABSIZE; i++)
2647 wm_set_ral(sc, NULL, i);
2648
2649 /* Clear out the multicast table. */
2650 for (i = 0; i < WM_MC_TABSIZE; i++)
2651 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2652
2653 ETHER_FIRST_MULTI(step, ec, enm);
2654 while (enm != NULL) {
2655 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2656 /*
2657 * We must listen to a range of multicast addresses.
2658 * For now, just accept all multicasts, rather than
2659 * trying to set only those filter bits needed to match
2660 * the range. (At this time, the only use of address
2661 * ranges is for IP multicast routing, for which the
2662 * range is big enough to require all bits set.)
2663 */
2664 goto allmulti;
2665 }
2666
2667 hash = wm_mchash(sc, enm->enm_addrlo);
2668
2669 reg = (hash >> 5) & 0x7f;
2670 bit = hash & 0x1f;
2671
2672 hash = CSR_READ(sc, mta_reg + (reg << 2));
2673 hash |= 1U << bit;
2674
2675 /* XXX Hardware bug?? */
2676 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2677 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2678 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2679 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2680 } else
2681 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2682
2683 ETHER_NEXT_MULTI(step, enm);
2684 }
2685
2686 ifp->if_flags &= ~IFF_ALLMULTI;
2687 goto setit;
2688
2689 allmulti:
2690 ifp->if_flags |= IFF_ALLMULTI;
2691 sc->sc_rctl |= RCTL_MPE;
2692
2693 setit:
2694 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2695 }
2696
2697 /*
2698 * wm_tbi_mediainit:
2699 *
2700 * Initialize media for use on 1000BASE-X devices.
2701 */
2702 static void
2703 wm_tbi_mediainit(struct wm_softc *sc)
2704 {
2705 const char *sep = "";
2706
2707 if (sc->sc_type < WM_T_82543)
2708 sc->sc_tipg = TIPG_WM_DFLT;
2709 else
2710 sc->sc_tipg = TIPG_LG_DFLT;
2711
2712 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2713 wm_tbi_mediastatus);
2714
2715 /*
2716 * SWD Pins:
2717 *
2718 * 0 = Link LED (output)
2719 * 1 = Loss Of Signal (input)
2720 */
2721 sc->sc_ctrl |= CTRL_SWDPIO(0);
2722 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2723
2724 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2725
2726 #define ADD(ss, mm, dd) \
2727 do { \
2728 printf("%s%s", sep, ss); \
2729 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2730 sep = ", "; \
2731 } while (/*CONSTCOND*/0)
2732
2733 printf("%s: ", sc->sc_dev.dv_xname);
2734 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2735 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2736 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2737 printf("\n");
2738
2739 #undef ADD
2740
2741 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2742 }
2743
2744 /*
2745 * wm_tbi_mediastatus: [ifmedia interface function]
2746 *
2747 * Get the current interface media status on a 1000BASE-X device.
2748 */
2749 static void
2750 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2751 {
2752 struct wm_softc *sc = ifp->if_softc;
2753
2754 ifmr->ifm_status = IFM_AVALID;
2755 ifmr->ifm_active = IFM_ETHER;
2756
2757 if (sc->sc_tbi_linkup == 0) {
2758 ifmr->ifm_active |= IFM_NONE;
2759 return;
2760 }
2761
2762 ifmr->ifm_status |= IFM_ACTIVE;
2763 ifmr->ifm_active |= IFM_1000_SX;
2764 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2765 ifmr->ifm_active |= IFM_FDX;
2766 }
2767
2768 /*
2769 * wm_tbi_mediachange: [ifmedia interface function]
2770 *
2771 * Set hardware to newly-selected media on a 1000BASE-X device.
2772 */
2773 static int
2774 wm_tbi_mediachange(struct ifnet *ifp)
2775 {
2776 struct wm_softc *sc = ifp->if_softc;
2777 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2778 uint32_t status;
2779 int i;
2780
2781 sc->sc_txcw = ife->ifm_data;
2782 if (sc->sc_ctrl & CTRL_RFCE)
2783 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2784 if (sc->sc_ctrl & CTRL_TFCE)
2785 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2786 sc->sc_txcw |= TXCW_ANE;
2787
2788 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2789 delay(10000);
2790
2791 sc->sc_tbi_anstate = 0;
2792
2793 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2794 /* Have signal; wait for the link to come up. */
2795 for (i = 0; i < 50; i++) {
2796 delay(10000);
2797 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2798 break;
2799 }
2800
2801 status = CSR_READ(sc, WMREG_STATUS);
2802 if (status & STATUS_LU) {
2803 /* Link is up. */
2804 DPRINTF(WM_DEBUG_LINK,
2805 ("%s: LINK: set media -> link up %s\n",
2806 sc->sc_dev.dv_xname,
2807 (status & STATUS_FD) ? "FDX" : "HDX"));
2808 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2809 if (status & STATUS_FD)
2810 sc->sc_tctl |=
2811 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2812 else
2813 sc->sc_tctl |=
2814 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2815 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2816 sc->sc_tbi_linkup = 1;
2817 } else {
2818 /* Link is down. */
2819 DPRINTF(WM_DEBUG_LINK,
2820 ("%s: LINK: set media -> link down\n",
2821 sc->sc_dev.dv_xname));
2822 sc->sc_tbi_linkup = 0;
2823 }
2824 } else {
2825 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2826 sc->sc_dev.dv_xname));
2827 sc->sc_tbi_linkup = 0;
2828 }
2829
2830 wm_tbi_set_linkled(sc);
2831
2832 return (0);
2833 }
2834
2835 /*
2836 * wm_tbi_set_linkled:
2837 *
2838 * Update the link LED on 1000BASE-X devices.
2839 */
2840 static void
2841 wm_tbi_set_linkled(struct wm_softc *sc)
2842 {
2843
2844 if (sc->sc_tbi_linkup)
2845 sc->sc_ctrl |= CTRL_SWDPIN(0);
2846 else
2847 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2848
2849 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2850 }
2851
2852 /*
2853 * wm_tbi_check_link:
2854 *
2855 * Check the link on 1000BASE-X devices.
2856 */
2857 static void
2858 wm_tbi_check_link(struct wm_softc *sc)
2859 {
2860 uint32_t rxcw, ctrl, status;
2861
2862 if (sc->sc_tbi_anstate == 0)
2863 return;
2864 else if (sc->sc_tbi_anstate > 1) {
2865 DPRINTF(WM_DEBUG_LINK,
2866 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2867 sc->sc_tbi_anstate));
2868 sc->sc_tbi_anstate--;
2869 return;
2870 }
2871
2872 sc->sc_tbi_anstate = 0;
2873
2874 rxcw = CSR_READ(sc, WMREG_RXCW);
2875 ctrl = CSR_READ(sc, WMREG_CTRL);
2876 status = CSR_READ(sc, WMREG_STATUS);
2877
2878 if ((status & STATUS_LU) == 0) {
2879 DPRINTF(WM_DEBUG_LINK,
2880 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2881 sc->sc_tbi_linkup = 0;
2882 } else {
2883 DPRINTF(WM_DEBUG_LINK,
2884 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2885 (status & STATUS_FD) ? "FDX" : "HDX"));
2886 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2887 if (status & STATUS_FD)
2888 sc->sc_tctl |=
2889 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2890 else
2891 sc->sc_tctl |=
2892 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2893 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2894 sc->sc_tbi_linkup = 1;
2895 }
2896
2897 wm_tbi_set_linkled(sc);
2898 }
2899
2900 /*
2901 * wm_gmii_reset:
2902 *
2903 * Reset the PHY.
2904 */
2905 static void
2906 wm_gmii_reset(struct wm_softc *sc)
2907 {
2908 uint32_t reg;
2909
2910 if (sc->sc_type >= WM_T_82544) {
2911 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2912 delay(20000);
2913
2914 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2915 delay(20000);
2916 } else {
2917 /* The PHY reset pin is active-low. */
2918 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2919 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2920 CTRL_EXT_SWDPIN(4));
2921 reg |= CTRL_EXT_SWDPIO(4);
2922
2923 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2924 delay(10);
2925
2926 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2927 delay(10);
2928
2929 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2930 delay(10);
2931 #if 0
2932 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2933 #endif
2934 }
2935 }
2936
2937 /*
2938 * wm_gmii_mediainit:
2939 *
2940 * Initialize media for use on 1000BASE-T devices.
2941 */
2942 static void
2943 wm_gmii_mediainit(struct wm_softc *sc)
2944 {
2945 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2946
2947 /* We have MII. */
2948 sc->sc_flags |= WM_F_HAS_MII;
2949
2950 sc->sc_tipg = TIPG_1000T_DFLT;
2951
2952 /*
2953 * Let the chip set speed/duplex on its own based on
2954 * signals from the PHY.
2955 */
2956 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2957 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2958
2959 /* Initialize our media structures and probe the GMII. */
2960 sc->sc_mii.mii_ifp = ifp;
2961
2962 if (sc->sc_type >= WM_T_82544) {
2963 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2964 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2965 } else {
2966 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2967 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2968 }
2969 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2970
2971 wm_gmii_reset(sc);
2972
2973 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2974 wm_gmii_mediastatus);
2975
2976 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2977 MII_OFFSET_ANY, 0);
2978 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2979 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2980 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2981 } else
2982 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2983 }
2984
2985 /*
2986 * wm_gmii_mediastatus: [ifmedia interface function]
2987 *
2988 * Get the current interface media status on a 1000BASE-T device.
2989 */
2990 static void
2991 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2992 {
2993 struct wm_softc *sc = ifp->if_softc;
2994
2995 mii_pollstat(&sc->sc_mii);
2996 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2997 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2998 }
2999
3000 /*
3001 * wm_gmii_mediachange: [ifmedia interface function]
3002 *
3003 * Set hardware to newly-selected media on a 1000BASE-T device.
3004 */
3005 static int
3006 wm_gmii_mediachange(struct ifnet *ifp)
3007 {
3008 struct wm_softc *sc = ifp->if_softc;
3009
3010 if (ifp->if_flags & IFF_UP)
3011 mii_mediachg(&sc->sc_mii);
3012 return (0);
3013 }
3014
3015 #define MDI_IO CTRL_SWDPIN(2)
3016 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
3017 #define MDI_CLK CTRL_SWDPIN(3)
3018
3019 static void
3020 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
3021 {
3022 uint32_t i, v;
3023
3024 v = CSR_READ(sc, WMREG_CTRL);
3025 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3026 v |= MDI_DIR | CTRL_SWDPIO(3);
3027
3028 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
3029 if (data & i)
3030 v |= MDI_IO;
3031 else
3032 v &= ~MDI_IO;
3033 CSR_WRITE(sc, WMREG_CTRL, v);
3034 delay(10);
3035 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3036 delay(10);
3037 CSR_WRITE(sc, WMREG_CTRL, v);
3038 delay(10);
3039 }
3040 }
3041
3042 static uint32_t
3043 i82543_mii_recvbits(struct wm_softc *sc)
3044 {
3045 uint32_t v, i, data = 0;
3046
3047 v = CSR_READ(sc, WMREG_CTRL);
3048 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
3049 v |= CTRL_SWDPIO(3);
3050
3051 CSR_WRITE(sc, WMREG_CTRL, v);
3052 delay(10);
3053 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3054 delay(10);
3055 CSR_WRITE(sc, WMREG_CTRL, v);
3056 delay(10);
3057
3058 for (i = 0; i < 16; i++) {
3059 data <<= 1;
3060 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3061 delay(10);
3062 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
3063 data |= 1;
3064 CSR_WRITE(sc, WMREG_CTRL, v);
3065 delay(10);
3066 }
3067
3068 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
3069 delay(10);
3070 CSR_WRITE(sc, WMREG_CTRL, v);
3071 delay(10);
3072
3073 return (data);
3074 }
3075
3076 #undef MDI_IO
3077 #undef MDI_DIR
3078 #undef MDI_CLK
3079
3080 /*
3081 * wm_gmii_i82543_readreg: [mii interface function]
3082 *
3083 * Read a PHY register on the GMII (i82543 version).
3084 */
3085 static int
3086 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
3087 {
3088 struct wm_softc *sc = (void *) self;
3089 int rv;
3090
3091 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3092 i82543_mii_sendbits(sc, reg | (phy << 5) |
3093 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
3094 rv = i82543_mii_recvbits(sc) & 0xffff;
3095
3096 DPRINTF(WM_DEBUG_GMII,
3097 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
3098 sc->sc_dev.dv_xname, phy, reg, rv));
3099
3100 return (rv);
3101 }
3102
3103 /*
3104 * wm_gmii_i82543_writereg: [mii interface function]
3105 *
3106 * Write a PHY register on the GMII (i82543 version).
3107 */
3108 static void
3109 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
3110 {
3111 struct wm_softc *sc = (void *) self;
3112
3113 i82543_mii_sendbits(sc, 0xffffffffU, 32);
3114 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
3115 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
3116 (MII_COMMAND_START << 30), 32);
3117 }
3118
3119 /*
3120 * wm_gmii_i82544_readreg: [mii interface function]
3121 *
3122 * Read a PHY register on the GMII.
3123 */
3124 static int
3125 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
3126 {
3127 struct wm_softc *sc = (void *) self;
3128 uint32_t mdic;
3129 int i, rv;
3130
3131 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
3132 MDIC_REGADD(reg));
3133
3134 for (i = 0; i < 100; i++) {
3135 mdic = CSR_READ(sc, WMREG_MDIC);
3136 if (mdic & MDIC_READY)
3137 break;
3138 delay(10);
3139 }
3140
3141 if ((mdic & MDIC_READY) == 0) {
3142 printf("%s: MDIC read timed out: phy %d reg %d\n",
3143 sc->sc_dev.dv_xname, phy, reg);
3144 rv = 0;
3145 } else if (mdic & MDIC_E) {
3146 #if 0 /* This is normal if no PHY is present. */
3147 printf("%s: MDIC read error: phy %d reg %d\n",
3148 sc->sc_dev.dv_xname, phy, reg);
3149 #endif
3150 rv = 0;
3151 } else {
3152 rv = MDIC_DATA(mdic);
3153 if (rv == 0xffff)
3154 rv = 0;
3155 }
3156
3157 return (rv);
3158 }
3159
3160 /*
3161 * wm_gmii_i82544_writereg: [mii interface function]
3162 *
3163 * Write a PHY register on the GMII.
3164 */
3165 static void
3166 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3167 {
3168 struct wm_softc *sc = (void *) self;
3169 uint32_t mdic;
3170 int i;
3171
3172 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3173 MDIC_REGADD(reg) | MDIC_DATA(val));
3174
3175 for (i = 0; i < 100; i++) {
3176 mdic = CSR_READ(sc, WMREG_MDIC);
3177 if (mdic & MDIC_READY)
3178 break;
3179 delay(10);
3180 }
3181
3182 if ((mdic & MDIC_READY) == 0)
3183 printf("%s: MDIC write timed out: phy %d reg %d\n",
3184 sc->sc_dev.dv_xname, phy, reg);
3185 else if (mdic & MDIC_E)
3186 printf("%s: MDIC write error: phy %d reg %d\n",
3187 sc->sc_dev.dv_xname, phy, reg);
3188 }
3189
3190 /*
3191 * wm_gmii_statchg: [mii interface function]
3192 *
3193 * Callback from MII layer when media changes.
3194 */
3195 static void
3196 wm_gmii_statchg(struct device *self)
3197 {
3198 struct wm_softc *sc = (void *) self;
3199
3200 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3201
3202 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3203 DPRINTF(WM_DEBUG_LINK,
3204 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3205 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3206 } else {
3207 DPRINTF(WM_DEBUG_LINK,
3208 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3209 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3210 }
3211
3212 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3213 }
3214