if_wm.c revision 1.42 1 /* $NetBSD: if_wm.c,v 1.42 2003/10/17 20:41:21 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix hw VLAN assist.
44 *
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.42 2003/10/17 20:41:21 thorpej Exp $");
49
50 #include "bpfilter.h"
51 #include "rnd.h"
52
53 #include <sys/param.h>
54 #include <sys/systm.h>
55 #include <sys/callout.h>
56 #include <sys/mbuf.h>
57 #include <sys/malloc.h>
58 #include <sys/kernel.h>
59 #include <sys/socket.h>
60 #include <sys/ioctl.h>
61 #include <sys/errno.h>
62 #include <sys/device.h>
63 #include <sys/queue.h>
64
65 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
66
67 #if NRND > 0
68 #include <sys/rnd.h>
69 #endif
70
71 #include <net/if.h>
72 #include <net/if_dl.h>
73 #include <net/if_media.h>
74 #include <net/if_ether.h>
75
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79
80 #include <netinet/in.h> /* XXX for struct ip */
81 #include <netinet/in_systm.h> /* XXX for struct ip */
82 #include <netinet/ip.h> /* XXX for struct ip */
83 #include <netinet/tcp.h> /* XXX for struct tcphdr */
84
85 #include <machine/bus.h>
86 #include <machine/intr.h>
87 #include <machine/endian.h>
88
89 #include <dev/mii/mii.h>
90 #include <dev/mii/miivar.h>
91 #include <dev/mii/mii_bitbang.h>
92
93 #include <dev/pci/pcireg.h>
94 #include <dev/pci/pcivar.h>
95 #include <dev/pci/pcidevs.h>
96
97 #include <dev/pci/if_wmreg.h>
98
99 #ifdef WM_DEBUG
100 #define WM_DEBUG_LINK 0x01
101 #define WM_DEBUG_TX 0x02
102 #define WM_DEBUG_RX 0x04
103 #define WM_DEBUG_GMII 0x08
104 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
105
106 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
107 #else
108 #define DPRINTF(x, y) /* nothing */
109 #endif /* WM_DEBUG */
110
111 /*
112 * Transmit descriptor list size. Due to errata, we can only have
113 * 256 hardware descriptors in the ring. We tell the upper layers
114 * that they can queue a lot of packets, and we go ahead and manage
115 * up to 64 of them at a time. We allow up to 16 DMA segments per
116 * packet.
117 */
118 #define WM_NTXSEGS 16
119 #define WM_IFQUEUELEN 256
120 #define WM_TXQUEUELEN 64
121 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
122 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
123 #define WM_NTXDESC 256
124 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
125 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
126 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
127
128 /*
129 * Receive descriptor list size. We have one Rx buffer for normal
130 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
131 * packet. We allocate 256 receive descriptors, each with a 2k
132 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
133 */
134 #define WM_NRXDESC 256
135 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
136 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
137 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
138
139 /*
140 * Control structures are DMA'd to the i82542 chip. We allocate them in
141 * a single clump that maps to a single DMA segment to make serveral things
142 * easier.
143 */
144 struct wm_control_data {
145 /*
146 * The transmit descriptors.
147 */
148 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
149
150 /*
151 * The receive descriptors.
152 */
153 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
154 };
155
156 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
157 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
158 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
159
160 /*
161 * Software state for transmit jobs.
162 */
163 struct wm_txsoft {
164 struct mbuf *txs_mbuf; /* head of our mbuf chain */
165 bus_dmamap_t txs_dmamap; /* our DMA map */
166 int txs_firstdesc; /* first descriptor in packet */
167 int txs_lastdesc; /* last descriptor in packet */
168 int txs_ndesc; /* # of descriptors used */
169 };
170
171 /*
172 * Software state for receive buffers. Each descriptor gets a
173 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
174 * more than one buffer, we chain them together.
175 */
176 struct wm_rxsoft {
177 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
178 bus_dmamap_t rxs_dmamap; /* our DMA map */
179 };
180
181 /*
182 * Software state per device.
183 */
184 struct wm_softc {
185 struct device sc_dev; /* generic device information */
186 bus_space_tag_t sc_st; /* bus space tag */
187 bus_space_handle_t sc_sh; /* bus space handle */
188 bus_dma_tag_t sc_dmat; /* bus DMA tag */
189 struct ethercom sc_ethercom; /* ethernet common data */
190 void *sc_sdhook; /* shutdown hook */
191
192 int sc_type; /* chip type; see below */
193 int sc_flags; /* flags; see below */
194
195 void *sc_ih; /* interrupt cookie */
196
197 struct mii_data sc_mii; /* MII/media information */
198
199 struct callout sc_tick_ch; /* tick callout */
200
201 bus_dmamap_t sc_cddmamap; /* control data DMA map */
202 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
203
204 int sc_align_tweak;
205
206 /*
207 * Software state for the transmit and receive descriptors.
208 */
209 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
210 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
211
212 /*
213 * Control data structures.
214 */
215 struct wm_control_data *sc_control_data;
216 #define sc_txdescs sc_control_data->wcd_txdescs
217 #define sc_rxdescs sc_control_data->wcd_rxdescs
218
219 #ifdef WM_EVENT_COUNTERS
220 /* Event counters. */
221 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
222 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
223 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
224 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
225 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
226 struct evcnt sc_ev_rxintr; /* Rx interrupts */
227 struct evcnt sc_ev_linkintr; /* Link interrupts */
228
229 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
230 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
231 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
232 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
233
234 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
235 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
236 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
237
238 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
239 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
240
241 struct evcnt sc_ev_tu; /* Tx underrun */
242 #endif /* WM_EVENT_COUNTERS */
243
244 bus_addr_t sc_tdt_reg; /* offset of TDT register */
245
246 int sc_txfree; /* number of free Tx descriptors */
247 int sc_txnext; /* next ready Tx descriptor */
248
249 int sc_txsfree; /* number of free Tx jobs */
250 int sc_txsnext; /* next free Tx job */
251 int sc_txsdirty; /* dirty Tx jobs */
252
253 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
254 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
255
256 bus_addr_t sc_rdt_reg; /* offset of RDT register */
257
258 int sc_rxptr; /* next ready Rx descriptor/queue ent */
259 int sc_rxdiscard;
260 int sc_rxlen;
261 struct mbuf *sc_rxhead;
262 struct mbuf *sc_rxtail;
263 struct mbuf **sc_rxtailp;
264
265 uint32_t sc_ctrl; /* prototype CTRL register */
266 #if 0
267 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
268 #endif
269 uint32_t sc_icr; /* prototype interrupt bits */
270 uint32_t sc_tctl; /* prototype TCTL register */
271 uint32_t sc_rctl; /* prototype RCTL register */
272 uint32_t sc_txcw; /* prototype TXCW register */
273 uint32_t sc_tipg; /* prototype TIPG register */
274
275 int sc_tbi_linkup; /* TBI link status */
276 int sc_tbi_anstate; /* autonegotiation state */
277
278 int sc_mchash_type; /* multicast filter offset */
279
280 #if NRND > 0
281 rndsource_element_t rnd_source; /* random source */
282 #endif
283 };
284
285 #define WM_RXCHAIN_RESET(sc) \
286 do { \
287 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
288 *(sc)->sc_rxtailp = NULL; \
289 (sc)->sc_rxlen = 0; \
290 } while (/*CONSTCOND*/0)
291
292 #define WM_RXCHAIN_LINK(sc, m) \
293 do { \
294 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
295 (sc)->sc_rxtailp = &(m)->m_next; \
296 } while (/*CONSTCOND*/0)
297
298 /* sc_type */
299 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
300 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
301 #define WM_T_82543 2 /* i82543 */
302 #define WM_T_82544 3 /* i82544 */
303 #define WM_T_82540 4 /* i82540 */
304 #define WM_T_82545 5 /* i82545 */
305 #define WM_T_82546 6 /* i82546 */
306
307 /* sc_flags */
308 #define WM_F_HAS_MII 0x01 /* has MII */
309 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
310
311 #ifdef WM_EVENT_COUNTERS
312 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
313 #else
314 #define WM_EVCNT_INCR(ev) /* nothing */
315 #endif
316
317 #define CSR_READ(sc, reg) \
318 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
319 #define CSR_WRITE(sc, reg, val) \
320 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
321
322 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
323 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
324
325 #define WM_CDTXSYNC(sc, x, n, ops) \
326 do { \
327 int __x, __n; \
328 \
329 __x = (x); \
330 __n = (n); \
331 \
332 /* If it will wrap around, sync to the end of the ring. */ \
333 if ((__x + __n) > WM_NTXDESC) { \
334 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
335 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
336 (WM_NTXDESC - __x), (ops)); \
337 __n -= (WM_NTXDESC - __x); \
338 __x = 0; \
339 } \
340 \
341 /* Now sync whatever is left. */ \
342 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
343 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
344 } while (/*CONSTCOND*/0)
345
346 #define WM_CDRXSYNC(sc, x, ops) \
347 do { \
348 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
349 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
350 } while (/*CONSTCOND*/0)
351
352 #define WM_INIT_RXDESC(sc, x) \
353 do { \
354 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
355 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
356 struct mbuf *__m = __rxs->rxs_mbuf; \
357 \
358 /* \
359 * Note: We scoot the packet forward 2 bytes in the buffer \
360 * so that the payload after the Ethernet header is aligned \
361 * to a 4-byte boundary. \
362 * \
363 * XXX BRAINDAMAGE ALERT! \
364 * The stupid chip uses the same size for every buffer, which \
365 * is set in the Receive Control register. We are using the 2K \
366 * size option, but what we REALLY want is (2K - 2)! For this \
367 * reason, we can't "scoot" packets longer than the standard \
368 * Ethernet MTU. On strict-alignment platforms, if the total \
369 * size exceeds (2K - 2) we set align_tweak to 0 and let \
370 * the upper layer copy the headers. \
371 */ \
372 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \
373 \
374 __rxd->wrx_addr.wa_low = \
375 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \
376 (sc)->sc_align_tweak); \
377 __rxd->wrx_addr.wa_high = 0; \
378 __rxd->wrx_len = 0; \
379 __rxd->wrx_cksum = 0; \
380 __rxd->wrx_status = 0; \
381 __rxd->wrx_errors = 0; \
382 __rxd->wrx_special = 0; \
383 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
384 \
385 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
386 } while (/*CONSTCOND*/0)
387
388 void wm_start(struct ifnet *);
389 void wm_watchdog(struct ifnet *);
390 int wm_ioctl(struct ifnet *, u_long, caddr_t);
391 int wm_init(struct ifnet *);
392 void wm_stop(struct ifnet *, int);
393
394 void wm_shutdown(void *);
395
396 void wm_reset(struct wm_softc *);
397 void wm_rxdrain(struct wm_softc *);
398 int wm_add_rxbuf(struct wm_softc *, int);
399 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
400 void wm_tick(void *);
401
402 void wm_set_filter(struct wm_softc *);
403
404 int wm_intr(void *);
405 void wm_txintr(struct wm_softc *);
406 void wm_rxintr(struct wm_softc *);
407 void wm_linkintr(struct wm_softc *, uint32_t);
408
409 void wm_tbi_mediainit(struct wm_softc *);
410 int wm_tbi_mediachange(struct ifnet *);
411 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
412
413 void wm_tbi_set_linkled(struct wm_softc *);
414 void wm_tbi_check_link(struct wm_softc *);
415
416 void wm_gmii_reset(struct wm_softc *);
417
418 int wm_gmii_i82543_readreg(struct device *, int, int);
419 void wm_gmii_i82543_writereg(struct device *, int, int, int);
420
421 int wm_gmii_i82544_readreg(struct device *, int, int);
422 void wm_gmii_i82544_writereg(struct device *, int, int, int);
423
424 void wm_gmii_statchg(struct device *);
425
426 void wm_gmii_mediainit(struct wm_softc *);
427 int wm_gmii_mediachange(struct ifnet *);
428 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
429
430 int wm_match(struct device *, struct cfdata *, void *);
431 void wm_attach(struct device *, struct device *, void *);
432
433 CFATTACH_DECL(wm, sizeof(struct wm_softc),
434 wm_match, wm_attach, NULL, NULL);
435
436 /*
437 * Devices supported by this driver.
438 */
439 const struct wm_product {
440 pci_vendor_id_t wmp_vendor;
441 pci_product_id_t wmp_product;
442 const char *wmp_name;
443 int wmp_type;
444 int wmp_flags;
445 #define WMP_F_1000X 0x01
446 #define WMP_F_1000T 0x02
447 } wm_products[] = {
448 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
449 "Intel i82542 1000BASE-X Ethernet",
450 WM_T_82542_2_1, WMP_F_1000X },
451
452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
453 "Intel i82543GC 1000BASE-X Ethernet",
454 WM_T_82543, WMP_F_1000X },
455
456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
457 "Intel i82543GC 1000BASE-T Ethernet",
458 WM_T_82543, WMP_F_1000T },
459
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
461 "Intel i82544EI 1000BASE-T Ethernet",
462 WM_T_82544, WMP_F_1000T },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
465 "Intel i82544EI 1000BASE-X Ethernet",
466 WM_T_82544, WMP_F_1000X },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
469 "Intel i82544GC 1000BASE-T Ethernet",
470 WM_T_82544, WMP_F_1000T },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
473 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
474 WM_T_82544, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
477 "Intel i82540EM 1000BASE-T Ethernet",
478 WM_T_82540, WMP_F_1000T },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
481 "Intel i82540EP 1000BASE-T Ethernet",
482 WM_T_82540, WMP_F_1000T },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
485 "Intel i82540EP 1000BASE-T Ethernet",
486 WM_T_82540, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
489 "Intel i82540EP 1000BASE-T Ethernet",
490 WM_T_82540, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
493 "Intel i82545EM 1000BASE-T Ethernet",
494 WM_T_82545, WMP_F_1000T },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
497 "Intel i82546EB 1000BASE-T Ethernet",
498 WM_T_82546, WMP_F_1000T },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_QUAD,
501 "Intel i82546EB 1000BASE-T Ethernet",
502 WM_T_82546, WMP_F_1000T },
503
504 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
505 "Intel i82545EM 1000BASE-X Ethernet",
506 WM_T_82545, WMP_F_1000X },
507
508 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
509 "Intel i82546EB 1000BASE-X Ethernet",
510 WM_T_82546, WMP_F_1000X },
511
512 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
513 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
514 WM_T_82540, WMP_F_1000T },
515
516 { 0, 0,
517 NULL,
518 0, 0 },
519 };
520
521 #ifdef WM_EVENT_COUNTERS
522 #if WM_NTXSEGS != 16
523 #error Update wm_txseg_evcnt_names
524 #endif
525 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
526 "txseg1",
527 "txseg2",
528 "txseg3",
529 "txseg4",
530 "txseg5",
531 "txseg6",
532 "txseg7",
533 "txseg8",
534 "txseg9",
535 "txseg10",
536 "txseg11",
537 "txseg12",
538 "txseg13",
539 "txseg14",
540 "txseg15",
541 "txseg16",
542 };
543 #endif /* WM_EVENT_COUNTERS */
544
545 static const struct wm_product *
546 wm_lookup(const struct pci_attach_args *pa)
547 {
548 const struct wm_product *wmp;
549
550 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
551 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
552 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
553 return (wmp);
554 }
555 return (NULL);
556 }
557
558 int
559 wm_match(struct device *parent, struct cfdata *cf, void *aux)
560 {
561 struct pci_attach_args *pa = aux;
562
563 if (wm_lookup(pa) != NULL)
564 return (1);
565
566 return (0);
567 }
568
569 void
570 wm_attach(struct device *parent, struct device *self, void *aux)
571 {
572 struct wm_softc *sc = (void *) self;
573 struct pci_attach_args *pa = aux;
574 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
575 pci_chipset_tag_t pc = pa->pa_pc;
576 pci_intr_handle_t ih;
577 const char *intrstr = NULL;
578 bus_space_tag_t memt;
579 bus_space_handle_t memh;
580 bus_dma_segment_t seg;
581 int memh_valid;
582 int i, rseg, error;
583 const struct wm_product *wmp;
584 uint8_t enaddr[ETHER_ADDR_LEN];
585 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
586 pcireg_t preg, memtype;
587 int pmreg;
588
589 callout_init(&sc->sc_tick_ch);
590
591 wmp = wm_lookup(pa);
592 if (wmp == NULL) {
593 printf("\n");
594 panic("wm_attach: impossible");
595 }
596
597 sc->sc_dmat = pa->pa_dmat;
598
599 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
600 aprint_naive(": Ethernet controller\n");
601 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
602
603 sc->sc_type = wmp->wmp_type;
604 if (sc->sc_type < WM_T_82543) {
605 if (preg < 2) {
606 aprint_error("%s: i82542 must be at least rev. 2\n",
607 sc->sc_dev.dv_xname);
608 return;
609 }
610 if (preg < 3)
611 sc->sc_type = WM_T_82542_2_0;
612 }
613
614 /*
615 * Some chips require a handshake to access the EEPROM.
616 */
617 if (sc->sc_type >= WM_T_82540)
618 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
619
620 /*
621 * Map the device.
622 */
623 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
624 switch (memtype) {
625 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
626 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
627 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
628 memtype, 0, &memt, &memh, NULL, NULL) == 0);
629 break;
630 default:
631 memh_valid = 0;
632 }
633
634 if (memh_valid) {
635 sc->sc_st = memt;
636 sc->sc_sh = memh;
637 } else {
638 aprint_error("%s: unable to map device registers\n",
639 sc->sc_dev.dv_xname);
640 return;
641 }
642
643 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
644 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
645 preg |= PCI_COMMAND_MASTER_ENABLE;
646 if (sc->sc_type < WM_T_82542_2_1)
647 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
648 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
649
650 /* Get it out of power save mode, if needed. */
651 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
652 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
653 PCI_PMCSR_STATE_MASK;
654 if (preg == PCI_PMCSR_STATE_D3) {
655 /*
656 * The card has lost all configuration data in
657 * this state, so punt.
658 */
659 aprint_error("%s: unable to wake from power state D3\n",
660 sc->sc_dev.dv_xname);
661 return;
662 }
663 if (preg != PCI_PMCSR_STATE_D0) {
664 aprint_normal("%s: waking up from power state D%d\n",
665 sc->sc_dev.dv_xname, preg);
666 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
667 PCI_PMCSR_STATE_D0);
668 }
669 }
670
671 /*
672 * Map and establish our interrupt.
673 */
674 if (pci_intr_map(pa, &ih)) {
675 aprint_error("%s: unable to map interrupt\n",
676 sc->sc_dev.dv_xname);
677 return;
678 }
679 intrstr = pci_intr_string(pc, ih);
680 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
681 if (sc->sc_ih == NULL) {
682 aprint_error("%s: unable to establish interrupt",
683 sc->sc_dev.dv_xname);
684 if (intrstr != NULL)
685 aprint_normal(" at %s", intrstr);
686 aprint_normal("\n");
687 return;
688 }
689 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
690
691 /*
692 * Allocate the control data structures, and create and load the
693 * DMA map for it.
694 */
695 if ((error = bus_dmamem_alloc(sc->sc_dmat,
696 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
697 0)) != 0) {
698 aprint_error(
699 "%s: unable to allocate control data, error = %d\n",
700 sc->sc_dev.dv_xname, error);
701 goto fail_0;
702 }
703
704 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
705 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
706 0)) != 0) {
707 aprint_error("%s: unable to map control data, error = %d\n",
708 sc->sc_dev.dv_xname, error);
709 goto fail_1;
710 }
711
712 if ((error = bus_dmamap_create(sc->sc_dmat,
713 sizeof(struct wm_control_data), 1,
714 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
715 aprint_error("%s: unable to create control data DMA map, "
716 "error = %d\n", sc->sc_dev.dv_xname, error);
717 goto fail_2;
718 }
719
720 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
721 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
722 0)) != 0) {
723 aprint_error(
724 "%s: unable to load control data DMA map, error = %d\n",
725 sc->sc_dev.dv_xname, error);
726 goto fail_3;
727 }
728
729 /*
730 * Create the transmit buffer DMA maps.
731 */
732 for (i = 0; i < WM_TXQUEUELEN; i++) {
733 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
734 WM_NTXSEGS, MCLBYTES, 0, 0,
735 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
736 aprint_error("%s: unable to create Tx DMA map %d, "
737 "error = %d\n", sc->sc_dev.dv_xname, i, error);
738 goto fail_4;
739 }
740 }
741
742 /*
743 * Create the receive buffer DMA maps.
744 */
745 for (i = 0; i < WM_NRXDESC; i++) {
746 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
747 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
748 aprint_error("%s: unable to create Rx DMA map %d, "
749 "error = %d\n", sc->sc_dev.dv_xname, i, error);
750 goto fail_5;
751 }
752 sc->sc_rxsoft[i].rxs_mbuf = NULL;
753 }
754
755 /*
756 * Reset the chip to a known state.
757 */
758 wm_reset(sc);
759
760 /*
761 * Read the Ethernet address from the EEPROM.
762 */
763 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
764 sizeof(myea) / sizeof(myea[0]), myea);
765 enaddr[0] = myea[0] & 0xff;
766 enaddr[1] = myea[0] >> 8;
767 enaddr[2] = myea[1] & 0xff;
768 enaddr[3] = myea[1] >> 8;
769 enaddr[4] = myea[2] & 0xff;
770 enaddr[5] = myea[2] >> 8;
771
772 /*
773 * Toggle the LSB of the MAC address on the second port
774 * of the i82546.
775 */
776 if (sc->sc_type == WM_T_82546) {
777 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
778 enaddr[5] ^= 1;
779 }
780
781 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
782 ether_sprintf(enaddr));
783
784 /*
785 * Read the config info from the EEPROM, and set up various
786 * bits in the control registers based on their contents.
787 */
788 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
789 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
790 if (sc->sc_type >= WM_T_82544)
791 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
792
793 if (cfg1 & EEPROM_CFG1_ILOS)
794 sc->sc_ctrl |= CTRL_ILOS;
795 if (sc->sc_type >= WM_T_82544) {
796 sc->sc_ctrl |=
797 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
798 CTRL_SWDPIO_SHIFT;
799 sc->sc_ctrl |=
800 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
801 CTRL_SWDPINS_SHIFT;
802 } else {
803 sc->sc_ctrl |=
804 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
805 CTRL_SWDPIO_SHIFT;
806 }
807
808 #if 0
809 if (sc->sc_type >= WM_T_82544) {
810 if (cfg1 & EEPROM_CFG1_IPS0)
811 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
812 if (cfg1 & EEPROM_CFG1_IPS1)
813 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
814 sc->sc_ctrl_ext |=
815 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
816 CTRL_EXT_SWDPIO_SHIFT;
817 sc->sc_ctrl_ext |=
818 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
819 CTRL_EXT_SWDPINS_SHIFT;
820 } else {
821 sc->sc_ctrl_ext |=
822 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
823 CTRL_EXT_SWDPIO_SHIFT;
824 }
825 #endif
826
827 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
828 #if 0
829 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
830 #endif
831
832 /*
833 * Set up some register offsets that are different between
834 * the i82542 and the i82543 and later chips.
835 */
836 if (sc->sc_type < WM_T_82543) {
837 sc->sc_rdt_reg = WMREG_OLD_RDT0;
838 sc->sc_tdt_reg = WMREG_OLD_TDT;
839 } else {
840 sc->sc_rdt_reg = WMREG_RDT;
841 sc->sc_tdt_reg = WMREG_TDT;
842 }
843
844 /*
845 * Determine if we should use flow control. We should
846 * always use it, unless we're on a i82542 < 2.1.
847 */
848 if (sc->sc_type >= WM_T_82542_2_1)
849 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
850
851 /*
852 * Determine if we're TBI or GMII mode, and initialize the
853 * media structures accordingly.
854 */
855 if (sc->sc_type < WM_T_82543 ||
856 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
857 if (wmp->wmp_flags & WMP_F_1000T)
858 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
859 "product!\n", sc->sc_dev.dv_xname);
860 wm_tbi_mediainit(sc);
861 } else {
862 if (wmp->wmp_flags & WMP_F_1000X)
863 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
864 "product!\n", sc->sc_dev.dv_xname);
865 wm_gmii_mediainit(sc);
866 }
867
868 ifp = &sc->sc_ethercom.ec_if;
869 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
870 ifp->if_softc = sc;
871 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
872 ifp->if_ioctl = wm_ioctl;
873 ifp->if_start = wm_start;
874 ifp->if_watchdog = wm_watchdog;
875 ifp->if_init = wm_init;
876 ifp->if_stop = wm_stop;
877 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
878 IFQ_SET_READY(&ifp->if_snd);
879
880 sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
881
882 /*
883 * If we're a i82543 or greater, we can support VLANs.
884 */
885 if (sc->sc_type >= WM_T_82543)
886 sc->sc_ethercom.ec_capabilities |=
887 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
888
889 /*
890 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
891 * on i82543 and later.
892 */
893 if (sc->sc_type >= WM_T_82543)
894 ifp->if_capabilities |=
895 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
896
897 /*
898 * Attach the interface.
899 */
900 if_attach(ifp);
901 ether_ifattach(ifp, enaddr);
902 #if NRND > 0
903 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
904 RND_TYPE_NET, 0);
905 #endif
906
907 #ifdef WM_EVENT_COUNTERS
908 /* Attach event counters. */
909 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
910 NULL, sc->sc_dev.dv_xname, "txsstall");
911 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
912 NULL, sc->sc_dev.dv_xname, "txdstall");
913 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
914 NULL, sc->sc_dev.dv_xname, "txforceintr");
915 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
916 NULL, sc->sc_dev.dv_xname, "txdw");
917 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
918 NULL, sc->sc_dev.dv_xname, "txqe");
919 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
920 NULL, sc->sc_dev.dv_xname, "rxintr");
921 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
922 NULL, sc->sc_dev.dv_xname, "linkintr");
923
924 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
925 NULL, sc->sc_dev.dv_xname, "rxipsum");
926 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "rxtusum");
928 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
929 NULL, sc->sc_dev.dv_xname, "txipsum");
930 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
931 NULL, sc->sc_dev.dv_xname, "txtusum");
932
933 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
934 NULL, sc->sc_dev.dv_xname, "txctx init");
935 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
936 NULL, sc->sc_dev.dv_xname, "txctx hit");
937 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
938 NULL, sc->sc_dev.dv_xname, "txctx miss");
939
940 for (i = 0; i < WM_NTXSEGS; i++)
941 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
942 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
943
944 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
945 NULL, sc->sc_dev.dv_xname, "txdrop");
946
947 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
948 NULL, sc->sc_dev.dv_xname, "tu");
949 #endif /* WM_EVENT_COUNTERS */
950
951 /*
952 * Make sure the interface is shutdown during reboot.
953 */
954 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
955 if (sc->sc_sdhook == NULL)
956 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
957 sc->sc_dev.dv_xname);
958 return;
959
960 /*
961 * Free any resources we've allocated during the failed attach
962 * attempt. Do this in reverse order and fall through.
963 */
964 fail_5:
965 for (i = 0; i < WM_NRXDESC; i++) {
966 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
967 bus_dmamap_destroy(sc->sc_dmat,
968 sc->sc_rxsoft[i].rxs_dmamap);
969 }
970 fail_4:
971 for (i = 0; i < WM_TXQUEUELEN; i++) {
972 if (sc->sc_txsoft[i].txs_dmamap != NULL)
973 bus_dmamap_destroy(sc->sc_dmat,
974 sc->sc_txsoft[i].txs_dmamap);
975 }
976 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
977 fail_3:
978 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
979 fail_2:
980 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
981 sizeof(struct wm_control_data));
982 fail_1:
983 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
984 fail_0:
985 return;
986 }
987
988 /*
989 * wm_shutdown:
990 *
991 * Make sure the interface is stopped at reboot time.
992 */
993 void
994 wm_shutdown(void *arg)
995 {
996 struct wm_softc *sc = arg;
997
998 wm_stop(&sc->sc_ethercom.ec_if, 1);
999 }
1000
1001 /*
1002 * wm_tx_cksum:
1003 *
1004 * Set up TCP/IP checksumming parameters for the
1005 * specified packet.
1006 */
1007 static int
1008 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1009 uint32_t *fieldsp)
1010 {
1011 struct mbuf *m0 = txs->txs_mbuf;
1012 struct livengood_tcpip_ctxdesc *t;
1013 uint32_t fields = 0, ipcs, tucs;
1014 struct ip *ip;
1015 struct ether_header *eh;
1016 int offset, iphl;
1017
1018 /*
1019 * XXX It would be nice if the mbuf pkthdr had offset
1020 * fields for the protocol headers.
1021 */
1022
1023 eh = mtod(m0, struct ether_header *);
1024 switch (htons(eh->ether_type)) {
1025 case ETHERTYPE_IP:
1026 iphl = sizeof(struct ip);
1027 offset = ETHER_HDR_LEN;
1028 break;
1029
1030 case ETHERTYPE_VLAN:
1031 iphl = sizeof(struct ip);
1032 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1033 break;
1034
1035 default:
1036 /*
1037 * Don't support this protocol or encapsulation.
1038 */
1039 *fieldsp = 0;
1040 *cmdp = 0;
1041 return (0);
1042 }
1043
1044 if (m0->m_len < (offset + iphl)) {
1045 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1046 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1047 "packet dropped\n", sc->sc_dev.dv_xname);
1048 return (ENOMEM);
1049 }
1050 m0 = txs->txs_mbuf;
1051 }
1052
1053 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1054 iphl = ip->ip_hl << 2;
1055
1056 /*
1057 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1058 * offload feature, if we load the context descriptor, we
1059 * MUST provide valid values for IPCSS and TUCSS fields.
1060 */
1061
1062 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1063 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1064 fields |= htole32(WTX_IXSM);
1065 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1066 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1067 WTX_TCPIP_IPCSE(offset + iphl - 1));
1068 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1069 /* Use the cached value. */
1070 ipcs = sc->sc_txctx_ipcs;
1071 } else {
1072 /* Just initialize it to the likely value anyway. */
1073 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1074 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1075 WTX_TCPIP_IPCSE(offset + iphl - 1));
1076 }
1077
1078 offset += iphl;
1079
1080 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1081 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1082 fields |= htole32(WTX_TXSM);
1083 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1084 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1085 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1086 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1087 /* Use the cached value. */
1088 tucs = sc->sc_txctx_tucs;
1089 } else {
1090 /* Just initialize it to a valid TCP context. */
1091 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1092 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1093 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1094 }
1095
1096 if (sc->sc_txctx_ipcs == ipcs &&
1097 sc->sc_txctx_tucs == tucs) {
1098 /* Cached context is fine. */
1099 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1100 } else {
1101 /* Fill in the context descriptor. */
1102 #ifdef WM_EVENT_COUNTERS
1103 if (sc->sc_txctx_ipcs == 0xffffffff &&
1104 sc->sc_txctx_tucs == 0xffffffff)
1105 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1106 else
1107 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1108 #endif
1109 t = (struct livengood_tcpip_ctxdesc *)
1110 &sc->sc_txdescs[sc->sc_txnext];
1111 t->tcpip_ipcs = ipcs;
1112 t->tcpip_tucs = tucs;
1113 t->tcpip_cmdlen =
1114 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1115 t->tcpip_seg = 0;
1116 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1117
1118 sc->sc_txctx_ipcs = ipcs;
1119 sc->sc_txctx_tucs = tucs;
1120
1121 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1122 txs->txs_ndesc++;
1123 }
1124
1125 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1126 *fieldsp = fields;
1127
1128 return (0);
1129 }
1130
1131 /*
1132 * wm_start: [ifnet interface function]
1133 *
1134 * Start packet transmission on the interface.
1135 */
1136 void
1137 wm_start(struct ifnet *ifp)
1138 {
1139 struct wm_softc *sc = ifp->if_softc;
1140 struct mbuf *m0;
1141 #if 0 /* XXXJRT */
1142 struct m_tag *mtag;
1143 #endif
1144 struct wm_txsoft *txs;
1145 bus_dmamap_t dmamap;
1146 int error, nexttx, lasttx, ofree, seg;
1147 uint32_t cksumcmd, cksumfields;
1148
1149 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1150 return;
1151
1152 /*
1153 * Remember the previous number of free descriptors.
1154 */
1155 ofree = sc->sc_txfree;
1156
1157 /*
1158 * Loop through the send queue, setting up transmit descriptors
1159 * until we drain the queue, or use up all available transmit
1160 * descriptors.
1161 */
1162 for (;;) {
1163 /* Grab a packet off the queue. */
1164 IFQ_POLL(&ifp->if_snd, m0);
1165 if (m0 == NULL)
1166 break;
1167
1168 DPRINTF(WM_DEBUG_TX,
1169 ("%s: TX: have packet to transmit: %p\n",
1170 sc->sc_dev.dv_xname, m0));
1171
1172 /* Get a work queue entry. */
1173 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1174 wm_txintr(sc);
1175 if (sc->sc_txsfree == 0) {
1176 DPRINTF(WM_DEBUG_TX,
1177 ("%s: TX: no free job descriptors\n",
1178 sc->sc_dev.dv_xname));
1179 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1180 break;
1181 }
1182 }
1183
1184 txs = &sc->sc_txsoft[sc->sc_txsnext];
1185 dmamap = txs->txs_dmamap;
1186
1187 /*
1188 * Load the DMA map. If this fails, the packet either
1189 * didn't fit in the allotted number of segments, or we
1190 * were short on resources. For the too-many-segments
1191 * case, we simply report an error and drop the packet,
1192 * since we can't sanely copy a jumbo packet to a single
1193 * buffer.
1194 */
1195 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1196 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1197 if (error) {
1198 if (error == EFBIG) {
1199 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1200 printf("%s: Tx packet consumes too many "
1201 "DMA segments, dropping...\n",
1202 sc->sc_dev.dv_xname);
1203 IFQ_DEQUEUE(&ifp->if_snd, m0);
1204 m_freem(m0);
1205 continue;
1206 }
1207 /*
1208 * Short on resources, just stop for now.
1209 */
1210 DPRINTF(WM_DEBUG_TX,
1211 ("%s: TX: dmamap load failed: %d\n",
1212 sc->sc_dev.dv_xname, error));
1213 break;
1214 }
1215
1216 /*
1217 * Ensure we have enough descriptors free to describe
1218 * the packet. Note, we always reserve one descriptor
1219 * at the end of the ring due to the semantics of the
1220 * TDT register, plus one more in the event we need
1221 * to re-load checksum offload context.
1222 */
1223 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1224 /*
1225 * Not enough free descriptors to transmit this
1226 * packet. We haven't committed anything yet,
1227 * so just unload the DMA map, put the packet
1228 * pack on the queue, and punt. Notify the upper
1229 * layer that there are no more slots left.
1230 */
1231 DPRINTF(WM_DEBUG_TX,
1232 ("%s: TX: need %d descriptors, have %d\n",
1233 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1234 sc->sc_txfree - 1));
1235 ifp->if_flags |= IFF_OACTIVE;
1236 bus_dmamap_unload(sc->sc_dmat, dmamap);
1237 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1238 break;
1239 }
1240
1241 IFQ_DEQUEUE(&ifp->if_snd, m0);
1242
1243 /*
1244 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1245 */
1246
1247 /* Sync the DMA map. */
1248 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1249 BUS_DMASYNC_PREWRITE);
1250
1251 DPRINTF(WM_DEBUG_TX,
1252 ("%s: TX: packet has %d DMA segments\n",
1253 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1254
1255 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1256
1257 /*
1258 * Store a pointer to the packet so that we can free it
1259 * later.
1260 *
1261 * Initially, we consider the number of descriptors the
1262 * packet uses the number of DMA segments. This may be
1263 * incremented by 1 if we do checksum offload (a descriptor
1264 * is used to set the checksum context).
1265 */
1266 txs->txs_mbuf = m0;
1267 txs->txs_firstdesc = sc->sc_txnext;
1268 txs->txs_ndesc = dmamap->dm_nsegs;
1269
1270 /*
1271 * Set up checksum offload parameters for
1272 * this packet.
1273 */
1274 if (m0->m_pkthdr.csum_flags &
1275 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1276 if (wm_tx_cksum(sc, txs, &cksumcmd,
1277 &cksumfields) != 0) {
1278 /* Error message already displayed. */
1279 bus_dmamap_unload(sc->sc_dmat, dmamap);
1280 continue;
1281 }
1282 } else {
1283 cksumcmd = 0;
1284 cksumfields = 0;
1285 }
1286
1287 cksumcmd |= htole32(WTX_CMD_IDE);
1288
1289 /*
1290 * Initialize the transmit descriptor.
1291 */
1292 for (nexttx = sc->sc_txnext, seg = 0;
1293 seg < dmamap->dm_nsegs;
1294 seg++, nexttx = WM_NEXTTX(nexttx)) {
1295 /*
1296 * Note: we currently only use 32-bit DMA
1297 * addresses.
1298 */
1299 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1300 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1301 htole32(dmamap->dm_segs[seg].ds_addr);
1302 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1303 htole32(dmamap->dm_segs[seg].ds_len);
1304 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1305 cksumfields;
1306 lasttx = nexttx;
1307
1308 DPRINTF(WM_DEBUG_TX,
1309 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1310 sc->sc_dev.dv_xname, nexttx,
1311 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1312 (uint32_t) dmamap->dm_segs[seg].ds_len));
1313 }
1314
1315 /*
1316 * Set up the command byte on the last descriptor of
1317 * the packet. If we're in the interrupt delay window,
1318 * delay the interrupt.
1319 */
1320 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1321 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1322
1323 #if 0 /* XXXJRT */
1324 /*
1325 * If VLANs are enabled and the packet has a VLAN tag, set
1326 * up the descriptor to encapsulate the packet for us.
1327 *
1328 * This is only valid on the last descriptor of the packet.
1329 */
1330 if (sc->sc_ethercom.ec_nvlans != 0 &&
1331 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1332 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1333 htole32(WTX_CMD_VLE);
1334 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1335 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1336 }
1337 #endif /* XXXJRT */
1338
1339 txs->txs_lastdesc = lasttx;
1340
1341 DPRINTF(WM_DEBUG_TX,
1342 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1343 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1344
1345 /* Sync the descriptors we're using. */
1346 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1347 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1348
1349 /* Give the packet to the chip. */
1350 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1351
1352 DPRINTF(WM_DEBUG_TX,
1353 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1354
1355 DPRINTF(WM_DEBUG_TX,
1356 ("%s: TX: finished transmitting packet, job %d\n",
1357 sc->sc_dev.dv_xname, sc->sc_txsnext));
1358
1359 /* Advance the tx pointer. */
1360 sc->sc_txfree -= txs->txs_ndesc;
1361 sc->sc_txnext = nexttx;
1362
1363 sc->sc_txsfree--;
1364 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1365
1366 #if NBPFILTER > 0
1367 /* Pass the packet to any BPF listeners. */
1368 if (ifp->if_bpf)
1369 bpf_mtap(ifp->if_bpf, m0);
1370 #endif /* NBPFILTER > 0 */
1371 }
1372
1373 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1374 /* No more slots; notify upper layer. */
1375 ifp->if_flags |= IFF_OACTIVE;
1376 }
1377
1378 if (sc->sc_txfree != ofree) {
1379 /* Set a watchdog timer in case the chip flakes out. */
1380 ifp->if_timer = 5;
1381 }
1382 }
1383
1384 /*
1385 * wm_watchdog: [ifnet interface function]
1386 *
1387 * Watchdog timer handler.
1388 */
1389 void
1390 wm_watchdog(struct ifnet *ifp)
1391 {
1392 struct wm_softc *sc = ifp->if_softc;
1393
1394 /*
1395 * Since we're using delayed interrupts, sweep up
1396 * before we report an error.
1397 */
1398 wm_txintr(sc);
1399
1400 if (sc->sc_txfree != WM_NTXDESC) {
1401 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1402 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1403 sc->sc_txnext);
1404 ifp->if_oerrors++;
1405
1406 /* Reset the interface. */
1407 (void) wm_init(ifp);
1408 }
1409
1410 /* Try to get more packets going. */
1411 wm_start(ifp);
1412 }
1413
1414 /*
1415 * wm_ioctl: [ifnet interface function]
1416 *
1417 * Handle control requests from the operator.
1418 */
1419 int
1420 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1421 {
1422 struct wm_softc *sc = ifp->if_softc;
1423 struct ifreq *ifr = (struct ifreq *) data;
1424 int s, error;
1425
1426 s = splnet();
1427
1428 switch (cmd) {
1429 case SIOCSIFMEDIA:
1430 case SIOCGIFMEDIA:
1431 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1432 break;
1433 default:
1434 error = ether_ioctl(ifp, cmd, data);
1435 if (error == ENETRESET) {
1436 /*
1437 * Multicast list has changed; set the hardware filter
1438 * accordingly.
1439 */
1440 wm_set_filter(sc);
1441 error = 0;
1442 }
1443 break;
1444 }
1445
1446 /* Try to get more packets going. */
1447 wm_start(ifp);
1448
1449 splx(s);
1450 return (error);
1451 }
1452
1453 /*
1454 * wm_intr:
1455 *
1456 * Interrupt service routine.
1457 */
1458 int
1459 wm_intr(void *arg)
1460 {
1461 struct wm_softc *sc = arg;
1462 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1463 uint32_t icr;
1464 int wantinit, handled = 0;
1465
1466 for (wantinit = 0; wantinit == 0;) {
1467 icr = CSR_READ(sc, WMREG_ICR);
1468 if ((icr & sc->sc_icr) == 0)
1469 break;
1470
1471 #if 0 /*NRND > 0*/
1472 if (RND_ENABLED(&sc->rnd_source))
1473 rnd_add_uint32(&sc->rnd_source, icr);
1474 #endif
1475
1476 handled = 1;
1477
1478 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1479 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1480 DPRINTF(WM_DEBUG_RX,
1481 ("%s: RX: got Rx intr 0x%08x\n",
1482 sc->sc_dev.dv_xname,
1483 icr & (ICR_RXDMT0|ICR_RXT0)));
1484 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1485 }
1486 #endif
1487 wm_rxintr(sc);
1488
1489 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1490 if (icr & ICR_TXDW) {
1491 DPRINTF(WM_DEBUG_TX,
1492 ("%s: TX: got TDXW interrupt\n",
1493 sc->sc_dev.dv_xname));
1494 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1495 }
1496 #endif
1497 wm_txintr(sc);
1498
1499 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1500 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1501 wm_linkintr(sc, icr);
1502 }
1503
1504 if (icr & ICR_RXO) {
1505 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1506 wantinit = 1;
1507 }
1508 }
1509
1510 if (handled) {
1511 if (wantinit)
1512 wm_init(ifp);
1513
1514 /* Try to get more packets going. */
1515 wm_start(ifp);
1516 }
1517
1518 return (handled);
1519 }
1520
1521 /*
1522 * wm_txintr:
1523 *
1524 * Helper; handle transmit interrupts.
1525 */
1526 void
1527 wm_txintr(struct wm_softc *sc)
1528 {
1529 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1530 struct wm_txsoft *txs;
1531 uint8_t status;
1532 int i;
1533
1534 ifp->if_flags &= ~IFF_OACTIVE;
1535
1536 /*
1537 * Go through the Tx list and free mbufs for those
1538 * frames which have been transmitted.
1539 */
1540 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1541 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1542 txs = &sc->sc_txsoft[i];
1543
1544 DPRINTF(WM_DEBUG_TX,
1545 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1546
1547 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1548 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1549
1550 status = le32toh(sc->sc_txdescs[
1551 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1552 if ((status & WTX_ST_DD) == 0) {
1553 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1554 BUS_DMASYNC_PREREAD);
1555 break;
1556 }
1557
1558 DPRINTF(WM_DEBUG_TX,
1559 ("%s: TX: job %d done: descs %d..%d\n",
1560 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1561 txs->txs_lastdesc));
1562
1563 /*
1564 * XXX We should probably be using the statistics
1565 * XXX registers, but I don't know if they exist
1566 * XXX on chips before the i82544.
1567 */
1568
1569 #ifdef WM_EVENT_COUNTERS
1570 if (status & WTX_ST_TU)
1571 WM_EVCNT_INCR(&sc->sc_ev_tu);
1572 #endif /* WM_EVENT_COUNTERS */
1573
1574 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1575 ifp->if_oerrors++;
1576 if (status & WTX_ST_LC)
1577 printf("%s: late collision\n",
1578 sc->sc_dev.dv_xname);
1579 else if (status & WTX_ST_EC) {
1580 ifp->if_collisions += 16;
1581 printf("%s: excessive collisions\n",
1582 sc->sc_dev.dv_xname);
1583 }
1584 } else
1585 ifp->if_opackets++;
1586
1587 sc->sc_txfree += txs->txs_ndesc;
1588 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1589 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1590 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1591 m_freem(txs->txs_mbuf);
1592 txs->txs_mbuf = NULL;
1593 }
1594
1595 /* Update the dirty transmit buffer pointer. */
1596 sc->sc_txsdirty = i;
1597 DPRINTF(WM_DEBUG_TX,
1598 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1599
1600 /*
1601 * If there are no more pending transmissions, cancel the watchdog
1602 * timer.
1603 */
1604 if (sc->sc_txsfree == WM_TXQUEUELEN)
1605 ifp->if_timer = 0;
1606 }
1607
1608 /*
1609 * wm_rxintr:
1610 *
1611 * Helper; handle receive interrupts.
1612 */
1613 void
1614 wm_rxintr(struct wm_softc *sc)
1615 {
1616 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1617 struct wm_rxsoft *rxs;
1618 struct mbuf *m;
1619 int i, len;
1620 uint8_t status, errors;
1621
1622 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1623 rxs = &sc->sc_rxsoft[i];
1624
1625 DPRINTF(WM_DEBUG_RX,
1626 ("%s: RX: checking descriptor %d\n",
1627 sc->sc_dev.dv_xname, i));
1628
1629 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1630
1631 status = sc->sc_rxdescs[i].wrx_status;
1632 errors = sc->sc_rxdescs[i].wrx_errors;
1633 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1634
1635 if ((status & WRX_ST_DD) == 0) {
1636 /*
1637 * We have processed all of the receive descriptors.
1638 */
1639 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1640 break;
1641 }
1642
1643 if (__predict_false(sc->sc_rxdiscard)) {
1644 DPRINTF(WM_DEBUG_RX,
1645 ("%s: RX: discarding contents of descriptor %d\n",
1646 sc->sc_dev.dv_xname, i));
1647 WM_INIT_RXDESC(sc, i);
1648 if (status & WRX_ST_EOP) {
1649 /* Reset our state. */
1650 DPRINTF(WM_DEBUG_RX,
1651 ("%s: RX: resetting rxdiscard -> 0\n",
1652 sc->sc_dev.dv_xname));
1653 sc->sc_rxdiscard = 0;
1654 }
1655 continue;
1656 }
1657
1658 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1659 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1660
1661 m = rxs->rxs_mbuf;
1662
1663 /*
1664 * Add a new receive buffer to the ring.
1665 */
1666 if (wm_add_rxbuf(sc, i) != 0) {
1667 /*
1668 * Failed, throw away what we've done so
1669 * far, and discard the rest of the packet.
1670 */
1671 ifp->if_ierrors++;
1672 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1673 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1674 WM_INIT_RXDESC(sc, i);
1675 if ((status & WRX_ST_EOP) == 0)
1676 sc->sc_rxdiscard = 1;
1677 if (sc->sc_rxhead != NULL)
1678 m_freem(sc->sc_rxhead);
1679 WM_RXCHAIN_RESET(sc);
1680 DPRINTF(WM_DEBUG_RX,
1681 ("%s: RX: Rx buffer allocation failed, "
1682 "dropping packet%s\n", sc->sc_dev.dv_xname,
1683 sc->sc_rxdiscard ? " (discard)" : ""));
1684 continue;
1685 }
1686
1687 WM_RXCHAIN_LINK(sc, m);
1688
1689 m->m_len = len;
1690
1691 DPRINTF(WM_DEBUG_RX,
1692 ("%s: RX: buffer at %p len %d\n",
1693 sc->sc_dev.dv_xname, m->m_data, len));
1694
1695 /*
1696 * If this is not the end of the packet, keep
1697 * looking.
1698 */
1699 if ((status & WRX_ST_EOP) == 0) {
1700 sc->sc_rxlen += len;
1701 DPRINTF(WM_DEBUG_RX,
1702 ("%s: RX: not yet EOP, rxlen -> %d\n",
1703 sc->sc_dev.dv_xname, sc->sc_rxlen));
1704 continue;
1705 }
1706
1707 /*
1708 * Okay, we have the entire packet now...
1709 */
1710 *sc->sc_rxtailp = NULL;
1711 m = sc->sc_rxhead;
1712 len += sc->sc_rxlen;
1713
1714 WM_RXCHAIN_RESET(sc);
1715
1716 DPRINTF(WM_DEBUG_RX,
1717 ("%s: RX: have entire packet, len -> %d\n",
1718 sc->sc_dev.dv_xname, len));
1719
1720 /*
1721 * If an error occurred, update stats and drop the packet.
1722 */
1723 if (errors &
1724 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1725 ifp->if_ierrors++;
1726 if (errors & WRX_ER_SE)
1727 printf("%s: symbol error\n",
1728 sc->sc_dev.dv_xname);
1729 else if (errors & WRX_ER_SEQ)
1730 printf("%s: receive sequence error\n",
1731 sc->sc_dev.dv_xname);
1732 else if (errors & WRX_ER_CE)
1733 printf("%s: CRC error\n",
1734 sc->sc_dev.dv_xname);
1735 m_freem(m);
1736 continue;
1737 }
1738
1739 /*
1740 * No errors. Receive the packet.
1741 *
1742 * Note, we have configured the chip to include the
1743 * CRC with every packet.
1744 */
1745 m->m_flags |= M_HASFCS;
1746 m->m_pkthdr.rcvif = ifp;
1747 m->m_pkthdr.len = len;
1748
1749 #if 0 /* XXXJRT */
1750 /*
1751 * If VLANs are enabled, VLAN packets have been unwrapped
1752 * for us. Associate the tag with the packet.
1753 */
1754 if (sc->sc_ethercom.ec_nvlans != 0 &&
1755 (status & WRX_ST_VP) != 0) {
1756 struct m_tag *vtag;
1757
1758 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1759 M_NOWAIT);
1760 if (vtag == NULL) {
1761 ifp->if_ierrors++;
1762 printf("%s: unable to allocate VLAN tag\n",
1763 sc->sc_dev.dv_xname);
1764 m_freem(m);
1765 continue;
1766 }
1767
1768 *(u_int *)(vtag + 1) =
1769 le16toh(sc->sc_rxdescs[i].wrx_special);
1770 }
1771 #endif /* XXXJRT */
1772
1773 /*
1774 * Set up checksum info for this packet.
1775 */
1776 if (status & WRX_ST_IPCS) {
1777 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1778 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1779 if (errors & WRX_ER_IPE)
1780 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1781 }
1782 if (status & WRX_ST_TCPCS) {
1783 /*
1784 * Note: we don't know if this was TCP or UDP,
1785 * so we just set both bits, and expect the
1786 * upper layers to deal.
1787 */
1788 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1789 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1790 if (errors & WRX_ER_TCPE)
1791 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1792 }
1793
1794 ifp->if_ipackets++;
1795
1796 #if NBPFILTER > 0
1797 /* Pass this up to any BPF listeners. */
1798 if (ifp->if_bpf)
1799 bpf_mtap(ifp->if_bpf, m);
1800 #endif /* NBPFILTER > 0 */
1801
1802 /* Pass it on. */
1803 (*ifp->if_input)(ifp, m);
1804 }
1805
1806 /* Update the receive pointer. */
1807 sc->sc_rxptr = i;
1808
1809 DPRINTF(WM_DEBUG_RX,
1810 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1811 }
1812
1813 /*
1814 * wm_linkintr:
1815 *
1816 * Helper; handle link interrupts.
1817 */
1818 void
1819 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1820 {
1821 uint32_t status;
1822
1823 /*
1824 * If we get a link status interrupt on a 1000BASE-T
1825 * device, just fall into the normal MII tick path.
1826 */
1827 if (sc->sc_flags & WM_F_HAS_MII) {
1828 if (icr & ICR_LSC) {
1829 DPRINTF(WM_DEBUG_LINK,
1830 ("%s: LINK: LSC -> mii_tick\n",
1831 sc->sc_dev.dv_xname));
1832 mii_tick(&sc->sc_mii);
1833 } else if (icr & ICR_RXSEQ) {
1834 DPRINTF(WM_DEBUG_LINK,
1835 ("%s: LINK Receive sequence error\n",
1836 sc->sc_dev.dv_xname));
1837 }
1838 return;
1839 }
1840
1841 /*
1842 * If we are now receiving /C/, check for link again in
1843 * a couple of link clock ticks.
1844 */
1845 if (icr & ICR_RXCFG) {
1846 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1847 sc->sc_dev.dv_xname));
1848 sc->sc_tbi_anstate = 2;
1849 }
1850
1851 if (icr & ICR_LSC) {
1852 status = CSR_READ(sc, WMREG_STATUS);
1853 if (status & STATUS_LU) {
1854 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1855 sc->sc_dev.dv_xname,
1856 (status & STATUS_FD) ? "FDX" : "HDX"));
1857 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1858 if (status & STATUS_FD)
1859 sc->sc_tctl |=
1860 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1861 else
1862 sc->sc_tctl |=
1863 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1864 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1865 sc->sc_tbi_linkup = 1;
1866 } else {
1867 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1868 sc->sc_dev.dv_xname));
1869 sc->sc_tbi_linkup = 0;
1870 }
1871 sc->sc_tbi_anstate = 2;
1872 wm_tbi_set_linkled(sc);
1873 } else if (icr & ICR_RXSEQ) {
1874 DPRINTF(WM_DEBUG_LINK,
1875 ("%s: LINK: Receive sequence error\n",
1876 sc->sc_dev.dv_xname));
1877 }
1878 }
1879
1880 /*
1881 * wm_tick:
1882 *
1883 * One second timer, used to check link status, sweep up
1884 * completed transmit jobs, etc.
1885 */
1886 void
1887 wm_tick(void *arg)
1888 {
1889 struct wm_softc *sc = arg;
1890 int s;
1891
1892 s = splnet();
1893
1894 if (sc->sc_flags & WM_F_HAS_MII)
1895 mii_tick(&sc->sc_mii);
1896 else
1897 wm_tbi_check_link(sc);
1898
1899 splx(s);
1900
1901 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1902 }
1903
1904 /*
1905 * wm_reset:
1906 *
1907 * Reset the i82542 chip.
1908 */
1909 void
1910 wm_reset(struct wm_softc *sc)
1911 {
1912 int i;
1913
1914 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1915 delay(10000);
1916
1917 for (i = 0; i < 1000; i++) {
1918 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1919 return;
1920 delay(20);
1921 }
1922
1923 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1924 printf("%s: WARNING: reset failed to complete\n",
1925 sc->sc_dev.dv_xname);
1926 }
1927
1928 /*
1929 * wm_init: [ifnet interface function]
1930 *
1931 * Initialize the interface. Must be called at splnet().
1932 */
1933 int
1934 wm_init(struct ifnet *ifp)
1935 {
1936 struct wm_softc *sc = ifp->if_softc;
1937 struct wm_rxsoft *rxs;
1938 int i, error = 0;
1939 uint32_t reg;
1940
1941 /*
1942 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
1943 * There is a small but measurable benefit to avoiding the adjusment
1944 * of the descriptor so that the headers are aligned, for normal mtu,
1945 * on such platforms. One possibility is that the DMA itself is
1946 * slightly more efficient if the front of the entire packet (instead
1947 * of the front of the headers) is aligned.
1948 *
1949 * Note we must always set align_tweak to 0 if we are using
1950 * jumbo frames.
1951 */
1952 #ifdef __NO_STRICT_ALIGNMENT
1953 sc->sc_align_tweak = 0;
1954 #else
1955 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
1956 sc->sc_align_tweak = 0;
1957 else
1958 sc->sc_align_tweak = 2;
1959 #endif /* __NO_STRICT_ALIGNMENT */
1960
1961 /* Cancel any pending I/O. */
1962 wm_stop(ifp, 0);
1963
1964 /* Reset the chip to a known state. */
1965 wm_reset(sc);
1966
1967 /* Initialize the transmit descriptor ring. */
1968 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1969 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1970 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1971 sc->sc_txfree = WM_NTXDESC;
1972 sc->sc_txnext = 0;
1973
1974 sc->sc_txctx_ipcs = 0xffffffff;
1975 sc->sc_txctx_tucs = 0xffffffff;
1976
1977 if (sc->sc_type < WM_T_82543) {
1978 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1979 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1980 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1981 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1982 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1983 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1984 } else {
1985 CSR_WRITE(sc, WMREG_TBDAH, 0);
1986 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1987 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1988 CSR_WRITE(sc, WMREG_TDH, 0);
1989 CSR_WRITE(sc, WMREG_TDT, 0);
1990 CSR_WRITE(sc, WMREG_TIDV, 128);
1991
1992 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1993 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1994 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1995 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1996 }
1997 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1998 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1999
2000 /* Initialize the transmit job descriptors. */
2001 for (i = 0; i < WM_TXQUEUELEN; i++)
2002 sc->sc_txsoft[i].txs_mbuf = NULL;
2003 sc->sc_txsfree = WM_TXQUEUELEN;
2004 sc->sc_txsnext = 0;
2005 sc->sc_txsdirty = 0;
2006
2007 /*
2008 * Initialize the receive descriptor and receive job
2009 * descriptor rings.
2010 */
2011 if (sc->sc_type < WM_T_82543) {
2012 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
2013 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
2014 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
2015 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
2016 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
2017 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
2018
2019 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
2020 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
2021 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
2022 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
2023 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2024 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2025 } else {
2026 CSR_WRITE(sc, WMREG_RDBAH, 0);
2027 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2028 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2029 CSR_WRITE(sc, WMREG_RDH, 0);
2030 CSR_WRITE(sc, WMREG_RDT, 0);
2031 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2032 }
2033 for (i = 0; i < WM_NRXDESC; i++) {
2034 rxs = &sc->sc_rxsoft[i];
2035 if (rxs->rxs_mbuf == NULL) {
2036 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2037 printf("%s: unable to allocate or map rx "
2038 "buffer %d, error = %d\n",
2039 sc->sc_dev.dv_xname, i, error);
2040 /*
2041 * XXX Should attempt to run with fewer receive
2042 * XXX buffers instead of just failing.
2043 */
2044 wm_rxdrain(sc);
2045 goto out;
2046 }
2047 } else
2048 WM_INIT_RXDESC(sc, i);
2049 }
2050 sc->sc_rxptr = 0;
2051 sc->sc_rxdiscard = 0;
2052 WM_RXCHAIN_RESET(sc);
2053
2054 /*
2055 * Clear out the VLAN table -- we don't use it (yet).
2056 */
2057 CSR_WRITE(sc, WMREG_VET, 0);
2058 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2059 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2060
2061 /*
2062 * Set up flow-control parameters.
2063 *
2064 * XXX Values could probably stand some tuning.
2065 */
2066 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2067 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2068 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2069 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2070
2071 if (sc->sc_type < WM_T_82543) {
2072 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2073 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2074 } else {
2075 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2076 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2077 }
2078 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2079 }
2080
2081 #if 0 /* XXXJRT */
2082 /* Deal with VLAN enables. */
2083 if (sc->sc_ethercom.ec_nvlans != 0)
2084 sc->sc_ctrl |= CTRL_VME;
2085 else
2086 #endif /* XXXJRT */
2087 sc->sc_ctrl &= ~CTRL_VME;
2088
2089 /* Write the control registers. */
2090 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2091 #if 0
2092 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2093 #endif
2094
2095 /*
2096 * Set up checksum offload parameters.
2097 */
2098 reg = CSR_READ(sc, WMREG_RXCSUM);
2099 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2100 reg |= RXCSUM_IPOFL;
2101 else
2102 reg &= ~RXCSUM_IPOFL;
2103 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2104 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2105 else {
2106 reg &= ~RXCSUM_TUOFL;
2107 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2108 reg &= ~RXCSUM_IPOFL;
2109 }
2110 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2111
2112 /*
2113 * Set up the interrupt registers.
2114 */
2115 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2116 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2117 ICR_RXO | ICR_RXT0;
2118 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2119 sc->sc_icr |= ICR_RXCFG;
2120 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2121
2122 /* Set up the inter-packet gap. */
2123 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2124
2125 #if 0 /* XXXJRT */
2126 /* Set the VLAN ethernetype. */
2127 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2128 #endif
2129
2130 /*
2131 * Set up the transmit control register; we start out with
2132 * a collision distance suitable for FDX, but update it whe
2133 * we resolve the media type.
2134 */
2135 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2136 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2137 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2138
2139 /* Set the media. */
2140 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2141
2142 /*
2143 * Set up the receive control register; we actually program
2144 * the register when we set the receive filter. Use multicast
2145 * address offset type 0.
2146 *
2147 * Only the i82544 has the ability to strip the incoming
2148 * CRC, so we don't enable that feature.
2149 */
2150 sc->sc_mchash_type = 0;
2151 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_LPE |
2152 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2153
2154 if(MCLBYTES == 2048) {
2155 sc->sc_rctl |= RCTL_2k;
2156 } else {
2157 /*
2158 * XXX MCLBYTES > 2048 causes "Tx packet consumes too many DMA"
2159 * XXX segments, dropping" -- why?
2160 */
2161 #if 0
2162 if(sc->sc_type >= WM_T_82543) {
2163 switch(MCLBYTES) {
2164 case 4096:
2165 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
2166 break;
2167 case 8192:
2168 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
2169 break;
2170 case 16384:
2171 sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
2172 break;
2173 default:
2174 panic("wm_init: MCLBYTES %d unsupported",
2175 MCLBYTES);
2176 break;
2177 }
2178 } else panic("wm_init: i82542 requires MCLBYTES = 2048");
2179 #else
2180 panic("wm_init: MCLBYTES > 2048 not supported.");
2181 #endif
2182 }
2183
2184 /* Set the receive filter. */
2185 wm_set_filter(sc);
2186
2187 /* Start the one second link check clock. */
2188 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2189
2190 /* ...all done! */
2191 ifp->if_flags |= IFF_RUNNING;
2192 ifp->if_flags &= ~IFF_OACTIVE;
2193
2194 out:
2195 if (error)
2196 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2197 return (error);
2198 }
2199
2200 /*
2201 * wm_rxdrain:
2202 *
2203 * Drain the receive queue.
2204 */
2205 void
2206 wm_rxdrain(struct wm_softc *sc)
2207 {
2208 struct wm_rxsoft *rxs;
2209 int i;
2210
2211 for (i = 0; i < WM_NRXDESC; i++) {
2212 rxs = &sc->sc_rxsoft[i];
2213 if (rxs->rxs_mbuf != NULL) {
2214 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2215 m_freem(rxs->rxs_mbuf);
2216 rxs->rxs_mbuf = NULL;
2217 }
2218 }
2219 }
2220
2221 /*
2222 * wm_stop: [ifnet interface function]
2223 *
2224 * Stop transmission on the interface.
2225 */
2226 void
2227 wm_stop(struct ifnet *ifp, int disable)
2228 {
2229 struct wm_softc *sc = ifp->if_softc;
2230 struct wm_txsoft *txs;
2231 int i;
2232
2233 /* Stop the one second clock. */
2234 callout_stop(&sc->sc_tick_ch);
2235
2236 if (sc->sc_flags & WM_F_HAS_MII) {
2237 /* Down the MII. */
2238 mii_down(&sc->sc_mii);
2239 }
2240
2241 /* Stop the transmit and receive processes. */
2242 CSR_WRITE(sc, WMREG_TCTL, 0);
2243 CSR_WRITE(sc, WMREG_RCTL, 0);
2244
2245 /* Release any queued transmit buffers. */
2246 for (i = 0; i < WM_TXQUEUELEN; i++) {
2247 txs = &sc->sc_txsoft[i];
2248 if (txs->txs_mbuf != NULL) {
2249 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2250 m_freem(txs->txs_mbuf);
2251 txs->txs_mbuf = NULL;
2252 }
2253 }
2254
2255 if (disable)
2256 wm_rxdrain(sc);
2257
2258 /* Mark the interface as down and cancel the watchdog timer. */
2259 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2260 ifp->if_timer = 0;
2261 }
2262
2263 /*
2264 * wm_read_eeprom:
2265 *
2266 * Read data from the serial EEPROM.
2267 */
2268 void
2269 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2270 {
2271 uint32_t reg;
2272 int i, x, addrbits = 6;
2273
2274 for (i = 0; i < wordcnt; i++) {
2275 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2276 reg = CSR_READ(sc, WMREG_EECD);
2277
2278 /* Get number of address bits. */
2279 if (reg & EECD_EE_SIZE)
2280 addrbits = 8;
2281
2282 /* Request EEPROM access. */
2283 reg |= EECD_EE_REQ;
2284 CSR_WRITE(sc, WMREG_EECD, reg);
2285
2286 /* ..and wait for it to be granted. */
2287 for (x = 0; x < 100; x++) {
2288 reg = CSR_READ(sc, WMREG_EECD);
2289 if (reg & EECD_EE_GNT)
2290 break;
2291 delay(5);
2292 }
2293 if ((reg & EECD_EE_GNT) == 0) {
2294 printf("%s: could not acquire EEPROM GNT\n",
2295 sc->sc_dev.dv_xname);
2296 *data = 0xffff;
2297 reg &= ~EECD_EE_REQ;
2298 CSR_WRITE(sc, WMREG_EECD, reg);
2299 continue;
2300 }
2301 } else
2302 reg = 0;
2303
2304 /* Clear SK and DI. */
2305 reg &= ~(EECD_SK | EECD_DI);
2306 CSR_WRITE(sc, WMREG_EECD, reg);
2307
2308 /* Set CHIP SELECT. */
2309 reg |= EECD_CS;
2310 CSR_WRITE(sc, WMREG_EECD, reg);
2311 delay(2);
2312
2313 /* Shift in the READ command. */
2314 for (x = 3; x > 0; x--) {
2315 if (UWIRE_OPC_READ & (1 << (x - 1)))
2316 reg |= EECD_DI;
2317 else
2318 reg &= ~EECD_DI;
2319 CSR_WRITE(sc, WMREG_EECD, reg);
2320 delay(2);
2321 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2322 delay(2);
2323 CSR_WRITE(sc, WMREG_EECD, reg);
2324 delay(2);
2325 }
2326
2327 /* Shift in address. */
2328 for (x = addrbits; x > 0; x--) {
2329 if ((word + i) & (1 << (x - 1)))
2330 reg |= EECD_DI;
2331 else
2332 reg &= ~EECD_DI;
2333 CSR_WRITE(sc, WMREG_EECD, reg);
2334 delay(2);
2335 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2336 delay(2);
2337 CSR_WRITE(sc, WMREG_EECD, reg);
2338 delay(2);
2339 }
2340
2341 /* Shift out the data. */
2342 reg &= ~EECD_DI;
2343 data[i] = 0;
2344 for (x = 16; x > 0; x--) {
2345 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2346 delay(2);
2347 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2348 data[i] |= (1 << (x - 1));
2349 CSR_WRITE(sc, WMREG_EECD, reg);
2350 delay(2);
2351 }
2352
2353 /* Clear CHIP SELECT. */
2354 reg &= ~EECD_CS;
2355 CSR_WRITE(sc, WMREG_EECD, reg);
2356 delay(2);
2357
2358 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2359 /* Release the EEPROM. */
2360 reg &= ~EECD_EE_REQ;
2361 CSR_WRITE(sc, WMREG_EECD, reg);
2362 }
2363 }
2364 }
2365
2366 /*
2367 * wm_add_rxbuf:
2368 *
2369 * Add a receive buffer to the indiciated descriptor.
2370 */
2371 int
2372 wm_add_rxbuf(struct wm_softc *sc, int idx)
2373 {
2374 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2375 struct mbuf *m;
2376 int error;
2377
2378 MGETHDR(m, M_DONTWAIT, MT_DATA);
2379 if (m == NULL)
2380 return (ENOBUFS);
2381
2382 MCLGET(m, M_DONTWAIT);
2383 if ((m->m_flags & M_EXT) == 0) {
2384 m_freem(m);
2385 return (ENOBUFS);
2386 }
2387
2388 if (rxs->rxs_mbuf != NULL)
2389 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2390
2391 rxs->rxs_mbuf = m;
2392
2393 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2394 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2395 BUS_DMA_READ|BUS_DMA_NOWAIT);
2396 if (error) {
2397 printf("%s: unable to load rx DMA map %d, error = %d\n",
2398 sc->sc_dev.dv_xname, idx, error);
2399 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2400 }
2401
2402 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2403 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2404
2405 WM_INIT_RXDESC(sc, idx);
2406
2407 return (0);
2408 }
2409
2410 /*
2411 * wm_set_ral:
2412 *
2413 * Set an entery in the receive address list.
2414 */
2415 static void
2416 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2417 {
2418 uint32_t ral_lo, ral_hi;
2419
2420 if (enaddr != NULL) {
2421 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2422 (enaddr[3] << 24);
2423 ral_hi = enaddr[4] | (enaddr[5] << 8);
2424 ral_hi |= RAL_AV;
2425 } else {
2426 ral_lo = 0;
2427 ral_hi = 0;
2428 }
2429
2430 if (sc->sc_type >= WM_T_82544) {
2431 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2432 ral_lo);
2433 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2434 ral_hi);
2435 } else {
2436 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2437 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2438 }
2439 }
2440
2441 /*
2442 * wm_mchash:
2443 *
2444 * Compute the hash of the multicast address for the 4096-bit
2445 * multicast filter.
2446 */
2447 static uint32_t
2448 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2449 {
2450 static const int lo_shift[4] = { 4, 3, 2, 0 };
2451 static const int hi_shift[4] = { 4, 5, 6, 8 };
2452 uint32_t hash;
2453
2454 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2455 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2456
2457 return (hash & 0xfff);
2458 }
2459
2460 /*
2461 * wm_set_filter:
2462 *
2463 * Set up the receive filter.
2464 */
2465 void
2466 wm_set_filter(struct wm_softc *sc)
2467 {
2468 struct ethercom *ec = &sc->sc_ethercom;
2469 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2470 struct ether_multi *enm;
2471 struct ether_multistep step;
2472 bus_addr_t mta_reg;
2473 uint32_t hash, reg, bit;
2474 int i;
2475
2476 if (sc->sc_type >= WM_T_82544)
2477 mta_reg = WMREG_CORDOVA_MTA;
2478 else
2479 mta_reg = WMREG_MTA;
2480
2481 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2482
2483 if (ifp->if_flags & IFF_BROADCAST)
2484 sc->sc_rctl |= RCTL_BAM;
2485 if (ifp->if_flags & IFF_PROMISC) {
2486 sc->sc_rctl |= RCTL_UPE;
2487 goto allmulti;
2488 }
2489
2490 /*
2491 * Set the station address in the first RAL slot, and
2492 * clear the remaining slots.
2493 */
2494 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2495 for (i = 1; i < WM_RAL_TABSIZE; i++)
2496 wm_set_ral(sc, NULL, i);
2497
2498 /* Clear out the multicast table. */
2499 for (i = 0; i < WM_MC_TABSIZE; i++)
2500 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2501
2502 ETHER_FIRST_MULTI(step, ec, enm);
2503 while (enm != NULL) {
2504 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2505 /*
2506 * We must listen to a range of multicast addresses.
2507 * For now, just accept all multicasts, rather than
2508 * trying to set only those filter bits needed to match
2509 * the range. (At this time, the only use of address
2510 * ranges is for IP multicast routing, for which the
2511 * range is big enough to require all bits set.)
2512 */
2513 goto allmulti;
2514 }
2515
2516 hash = wm_mchash(sc, enm->enm_addrlo);
2517
2518 reg = (hash >> 5) & 0x7f;
2519 bit = hash & 0x1f;
2520
2521 hash = CSR_READ(sc, mta_reg + (reg << 2));
2522 hash |= 1U << bit;
2523
2524 /* XXX Hardware bug?? */
2525 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2526 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2527 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2528 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2529 } else
2530 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2531
2532 ETHER_NEXT_MULTI(step, enm);
2533 }
2534
2535 ifp->if_flags &= ~IFF_ALLMULTI;
2536 goto setit;
2537
2538 allmulti:
2539 ifp->if_flags |= IFF_ALLMULTI;
2540 sc->sc_rctl |= RCTL_MPE;
2541
2542 setit:
2543 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2544 }
2545
2546 /*
2547 * wm_tbi_mediainit:
2548 *
2549 * Initialize media for use on 1000BASE-X devices.
2550 */
2551 void
2552 wm_tbi_mediainit(struct wm_softc *sc)
2553 {
2554 const char *sep = "";
2555
2556 if (sc->sc_type < WM_T_82543)
2557 sc->sc_tipg = TIPG_WM_DFLT;
2558 else
2559 sc->sc_tipg = TIPG_LG_DFLT;
2560
2561 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2562 wm_tbi_mediastatus);
2563
2564 /*
2565 * SWD Pins:
2566 *
2567 * 0 = Link LED (output)
2568 * 1 = Loss Of Signal (input)
2569 */
2570 sc->sc_ctrl |= CTRL_SWDPIO(0);
2571 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2572
2573 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2574
2575 #define ADD(ss, mm, dd) \
2576 do { \
2577 printf("%s%s", sep, ss); \
2578 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2579 sep = ", "; \
2580 } while (/*CONSTCOND*/0)
2581
2582 printf("%s: ", sc->sc_dev.dv_xname);
2583 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2584 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2585 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2586 printf("\n");
2587
2588 #undef ADD
2589
2590 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2591 }
2592
2593 /*
2594 * wm_tbi_mediastatus: [ifmedia interface function]
2595 *
2596 * Get the current interface media status on a 1000BASE-X device.
2597 */
2598 void
2599 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2600 {
2601 struct wm_softc *sc = ifp->if_softc;
2602
2603 ifmr->ifm_status = IFM_AVALID;
2604 ifmr->ifm_active = IFM_ETHER;
2605
2606 if (sc->sc_tbi_linkup == 0) {
2607 ifmr->ifm_active |= IFM_NONE;
2608 return;
2609 }
2610
2611 ifmr->ifm_status |= IFM_ACTIVE;
2612 ifmr->ifm_active |= IFM_1000_SX;
2613 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2614 ifmr->ifm_active |= IFM_FDX;
2615 }
2616
2617 /*
2618 * wm_tbi_mediachange: [ifmedia interface function]
2619 *
2620 * Set hardware to newly-selected media on a 1000BASE-X device.
2621 */
2622 int
2623 wm_tbi_mediachange(struct ifnet *ifp)
2624 {
2625 struct wm_softc *sc = ifp->if_softc;
2626 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2627 uint32_t status;
2628 int i;
2629
2630 sc->sc_txcw = ife->ifm_data;
2631 if (sc->sc_ctrl & CTRL_RFCE)
2632 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2633 if (sc->sc_ctrl & CTRL_TFCE)
2634 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2635 sc->sc_txcw |= TXCW_ANE;
2636
2637 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2638 delay(10000);
2639
2640 sc->sc_tbi_anstate = 0;
2641
2642 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2643 /* Have signal; wait for the link to come up. */
2644 for (i = 0; i < 50; i++) {
2645 delay(10000);
2646 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2647 break;
2648 }
2649
2650 status = CSR_READ(sc, WMREG_STATUS);
2651 if (status & STATUS_LU) {
2652 /* Link is up. */
2653 DPRINTF(WM_DEBUG_LINK,
2654 ("%s: LINK: set media -> link up %s\n",
2655 sc->sc_dev.dv_xname,
2656 (status & STATUS_FD) ? "FDX" : "HDX"));
2657 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2658 if (status & STATUS_FD)
2659 sc->sc_tctl |=
2660 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2661 else
2662 sc->sc_tctl |=
2663 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2664 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2665 sc->sc_tbi_linkup = 1;
2666 } else {
2667 /* Link is down. */
2668 DPRINTF(WM_DEBUG_LINK,
2669 ("%s: LINK: set media -> link down\n",
2670 sc->sc_dev.dv_xname));
2671 sc->sc_tbi_linkup = 0;
2672 }
2673 } else {
2674 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2675 sc->sc_dev.dv_xname));
2676 sc->sc_tbi_linkup = 0;
2677 }
2678
2679 wm_tbi_set_linkled(sc);
2680
2681 return (0);
2682 }
2683
2684 /*
2685 * wm_tbi_set_linkled:
2686 *
2687 * Update the link LED on 1000BASE-X devices.
2688 */
2689 void
2690 wm_tbi_set_linkled(struct wm_softc *sc)
2691 {
2692
2693 if (sc->sc_tbi_linkup)
2694 sc->sc_ctrl |= CTRL_SWDPIN(0);
2695 else
2696 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2697
2698 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2699 }
2700
2701 /*
2702 * wm_tbi_check_link:
2703 *
2704 * Check the link on 1000BASE-X devices.
2705 */
2706 void
2707 wm_tbi_check_link(struct wm_softc *sc)
2708 {
2709 uint32_t rxcw, ctrl, status;
2710
2711 if (sc->sc_tbi_anstate == 0)
2712 return;
2713 else if (sc->sc_tbi_anstate > 1) {
2714 DPRINTF(WM_DEBUG_LINK,
2715 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2716 sc->sc_tbi_anstate));
2717 sc->sc_tbi_anstate--;
2718 return;
2719 }
2720
2721 sc->sc_tbi_anstate = 0;
2722
2723 rxcw = CSR_READ(sc, WMREG_RXCW);
2724 ctrl = CSR_READ(sc, WMREG_CTRL);
2725 status = CSR_READ(sc, WMREG_STATUS);
2726
2727 if ((status & STATUS_LU) == 0) {
2728 DPRINTF(WM_DEBUG_LINK,
2729 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2730 sc->sc_tbi_linkup = 0;
2731 } else {
2732 DPRINTF(WM_DEBUG_LINK,
2733 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2734 (status & STATUS_FD) ? "FDX" : "HDX"));
2735 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2736 if (status & STATUS_FD)
2737 sc->sc_tctl |=
2738 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2739 else
2740 sc->sc_tctl |=
2741 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2742 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2743 sc->sc_tbi_linkup = 1;
2744 }
2745
2746 wm_tbi_set_linkled(sc);
2747 }
2748
2749 /*
2750 * wm_gmii_reset:
2751 *
2752 * Reset the PHY.
2753 */
2754 void
2755 wm_gmii_reset(struct wm_softc *sc)
2756 {
2757 uint32_t reg;
2758
2759 if (sc->sc_type >= WM_T_82544) {
2760 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2761 delay(20000);
2762
2763 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2764 delay(20000);
2765 } else {
2766 /* The PHY reset pin is active-low. */
2767 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2768 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2769 CTRL_EXT_SWDPIN(4));
2770 reg |= CTRL_EXT_SWDPIO(4);
2771
2772 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2773 delay(10);
2774
2775 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2776 delay(10);
2777
2778 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2779 delay(10);
2780 #if 0
2781 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2782 #endif
2783 }
2784 }
2785
2786 /*
2787 * wm_gmii_mediainit:
2788 *
2789 * Initialize media for use on 1000BASE-T devices.
2790 */
2791 void
2792 wm_gmii_mediainit(struct wm_softc *sc)
2793 {
2794 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2795
2796 /* We have MII. */
2797 sc->sc_flags |= WM_F_HAS_MII;
2798
2799 sc->sc_tipg = TIPG_1000T_DFLT;
2800
2801 /*
2802 * Let the chip set speed/duplex on its own based on
2803 * signals from the PHY.
2804 */
2805 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2806 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2807
2808 /* Initialize our media structures and probe the GMII. */
2809 sc->sc_mii.mii_ifp = ifp;
2810
2811 if (sc->sc_type >= WM_T_82544) {
2812 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2813 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2814 } else {
2815 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2816 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2817 }
2818 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2819
2820 wm_gmii_reset(sc);
2821
2822 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2823 wm_gmii_mediastatus);
2824
2825 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2826 MII_OFFSET_ANY, 0);
2827 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2828 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2829 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2830 } else
2831 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2832 }
2833
2834 /*
2835 * wm_gmii_mediastatus: [ifmedia interface function]
2836 *
2837 * Get the current interface media status on a 1000BASE-T device.
2838 */
2839 void
2840 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2841 {
2842 struct wm_softc *sc = ifp->if_softc;
2843
2844 mii_pollstat(&sc->sc_mii);
2845 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2846 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2847 }
2848
2849 /*
2850 * wm_gmii_mediachange: [ifmedia interface function]
2851 *
2852 * Set hardware to newly-selected media on a 1000BASE-T device.
2853 */
2854 int
2855 wm_gmii_mediachange(struct ifnet *ifp)
2856 {
2857 struct wm_softc *sc = ifp->if_softc;
2858
2859 if (ifp->if_flags & IFF_UP)
2860 mii_mediachg(&sc->sc_mii);
2861 return (0);
2862 }
2863
2864 #define MDI_IO CTRL_SWDPIN(2)
2865 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2866 #define MDI_CLK CTRL_SWDPIN(3)
2867
2868 static void
2869 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2870 {
2871 uint32_t i, v;
2872
2873 v = CSR_READ(sc, WMREG_CTRL);
2874 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2875 v |= MDI_DIR | CTRL_SWDPIO(3);
2876
2877 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2878 if (data & i)
2879 v |= MDI_IO;
2880 else
2881 v &= ~MDI_IO;
2882 CSR_WRITE(sc, WMREG_CTRL, v);
2883 delay(10);
2884 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2885 delay(10);
2886 CSR_WRITE(sc, WMREG_CTRL, v);
2887 delay(10);
2888 }
2889 }
2890
2891 static uint32_t
2892 i82543_mii_recvbits(struct wm_softc *sc)
2893 {
2894 uint32_t v, i, data = 0;
2895
2896 v = CSR_READ(sc, WMREG_CTRL);
2897 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2898 v |= CTRL_SWDPIO(3);
2899
2900 CSR_WRITE(sc, WMREG_CTRL, v);
2901 delay(10);
2902 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2903 delay(10);
2904 CSR_WRITE(sc, WMREG_CTRL, v);
2905 delay(10);
2906
2907 for (i = 0; i < 16; i++) {
2908 data <<= 1;
2909 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2910 delay(10);
2911 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2912 data |= 1;
2913 CSR_WRITE(sc, WMREG_CTRL, v);
2914 delay(10);
2915 }
2916
2917 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2918 delay(10);
2919 CSR_WRITE(sc, WMREG_CTRL, v);
2920 delay(10);
2921
2922 return (data);
2923 }
2924
2925 #undef MDI_IO
2926 #undef MDI_DIR
2927 #undef MDI_CLK
2928
2929 /*
2930 * wm_gmii_i82543_readreg: [mii interface function]
2931 *
2932 * Read a PHY register on the GMII (i82543 version).
2933 */
2934 int
2935 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2936 {
2937 struct wm_softc *sc = (void *) self;
2938 int rv;
2939
2940 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2941 i82543_mii_sendbits(sc, reg | (phy << 5) |
2942 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2943 rv = i82543_mii_recvbits(sc) & 0xffff;
2944
2945 DPRINTF(WM_DEBUG_GMII,
2946 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2947 sc->sc_dev.dv_xname, phy, reg, rv));
2948
2949 return (rv);
2950 }
2951
2952 /*
2953 * wm_gmii_i82543_writereg: [mii interface function]
2954 *
2955 * Write a PHY register on the GMII (i82543 version).
2956 */
2957 void
2958 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2959 {
2960 struct wm_softc *sc = (void *) self;
2961
2962 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2963 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2964 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2965 (MII_COMMAND_START << 30), 32);
2966 }
2967
2968 /*
2969 * wm_gmii_i82544_readreg: [mii interface function]
2970 *
2971 * Read a PHY register on the GMII.
2972 */
2973 int
2974 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2975 {
2976 struct wm_softc *sc = (void *) self;
2977 uint32_t mdic;
2978 int i, rv;
2979
2980 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2981 MDIC_REGADD(reg));
2982
2983 for (i = 0; i < 100; i++) {
2984 mdic = CSR_READ(sc, WMREG_MDIC);
2985 if (mdic & MDIC_READY)
2986 break;
2987 delay(10);
2988 }
2989
2990 if ((mdic & MDIC_READY) == 0) {
2991 printf("%s: MDIC read timed out: phy %d reg %d\n",
2992 sc->sc_dev.dv_xname, phy, reg);
2993 rv = 0;
2994 } else if (mdic & MDIC_E) {
2995 #if 0 /* This is normal if no PHY is present. */
2996 printf("%s: MDIC read error: phy %d reg %d\n",
2997 sc->sc_dev.dv_xname, phy, reg);
2998 #endif
2999 rv = 0;
3000 } else {
3001 rv = MDIC_DATA(mdic);
3002 if (rv == 0xffff)
3003 rv = 0;
3004 }
3005
3006 return (rv);
3007 }
3008
3009 /*
3010 * wm_gmii_i82544_writereg: [mii interface function]
3011 *
3012 * Write a PHY register on the GMII.
3013 */
3014 void
3015 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
3016 {
3017 struct wm_softc *sc = (void *) self;
3018 uint32_t mdic;
3019 int i;
3020
3021 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
3022 MDIC_REGADD(reg) | MDIC_DATA(val));
3023
3024 for (i = 0; i < 100; i++) {
3025 mdic = CSR_READ(sc, WMREG_MDIC);
3026 if (mdic & MDIC_READY)
3027 break;
3028 delay(10);
3029 }
3030
3031 if ((mdic & MDIC_READY) == 0)
3032 printf("%s: MDIC write timed out: phy %d reg %d\n",
3033 sc->sc_dev.dv_xname, phy, reg);
3034 else if (mdic & MDIC_E)
3035 printf("%s: MDIC write error: phy %d reg %d\n",
3036 sc->sc_dev.dv_xname, phy, reg);
3037 }
3038
3039 /*
3040 * wm_gmii_statchg: [mii interface function]
3041 *
3042 * Callback from MII layer when media changes.
3043 */
3044 void
3045 wm_gmii_statchg(struct device *self)
3046 {
3047 struct wm_softc *sc = (void *) self;
3048
3049 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3050
3051 if (sc->sc_mii.mii_media_active & IFM_FDX) {
3052 DPRINTF(WM_DEBUG_LINK,
3053 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3054 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3055 } else {
3056 DPRINTF(WM_DEBUG_LINK,
3057 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3058 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3059 }
3060
3061 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3062 }
3063