if_wm.c revision 1.37 1 /* $NetBSD: if_wm.c,v 1.37 2003/04/29 01:15:38 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include "bpfilter.h"
52 #include "rnd.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <netinet/in.h> /* XXX for struct ip */
82 #include <netinet/in_systm.h> /* XXX for struct ip */
83 #include <netinet/ip.h> /* XXX for struct ip */
84 #include <netinet/tcp.h> /* XXX for struct tcphdr */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and manage
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 /*
183 * Software state per device.
184 */
185 struct wm_softc {
186 struct device sc_dev; /* generic device information */
187 bus_space_tag_t sc_st; /* bus space tag */
188 bus_space_handle_t sc_sh; /* bus space handle */
189 bus_dma_tag_t sc_dmat; /* bus DMA tag */
190 struct ethercom sc_ethercom; /* ethernet common data */
191 void *sc_sdhook; /* shutdown hook */
192
193 int sc_type; /* chip type; see below */
194 int sc_flags; /* flags; see below */
195
196 void *sc_ih; /* interrupt cookie */
197
198 struct mii_data sc_mii; /* MII/media information */
199
200 struct callout sc_tick_ch; /* tick callout */
201
202 bus_dmamap_t sc_cddmamap; /* control data DMA map */
203 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
204
205 /*
206 * Software state for the transmit and receive descriptors.
207 */
208 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210
211 /*
212 * Control data structures.
213 */
214 struct wm_control_data *sc_control_data;
215 #define sc_txdescs sc_control_data->wcd_txdescs
216 #define sc_rxdescs sc_control_data->wcd_rxdescs
217
218 #ifdef WM_EVENT_COUNTERS
219 /* Event counters. */
220 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
221 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
222 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
223 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
224 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
225 struct evcnt sc_ev_rxintr; /* Rx interrupts */
226 struct evcnt sc_ev_linkintr; /* Link interrupts */
227
228 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
229 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
230 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
231 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
232
233 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
234 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
235 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
236
237 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
239
240 struct evcnt sc_ev_tu; /* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242
243 bus_addr_t sc_tdt_reg; /* offset of TDT register */
244
245 int sc_txfree; /* number of free Tx descriptors */
246 int sc_txnext; /* next ready Tx descriptor */
247
248 int sc_txsfree; /* number of free Tx jobs */
249 int sc_txsnext; /* next free Tx job */
250 int sc_txsdirty; /* dirty Tx jobs */
251
252 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
253 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278
279 #if NRND > 0
280 rndsource_element_t rnd_source; /* random source */
281 #endif
282 };
283
284 #define WM_RXCHAIN_RESET(sc) \
285 do { \
286 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
287 *(sc)->sc_rxtailp = NULL; \
288 (sc)->sc_rxlen = 0; \
289 } while (/*CONSTCOND*/0)
290
291 #define WM_RXCHAIN_LINK(sc, m) \
292 do { \
293 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
294 (sc)->sc_rxtailp = &(m)->m_next; \
295 } while (/*CONSTCOND*/0)
296
297 /* sc_type */
298 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
299 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
300 #define WM_T_82543 2 /* i82543 */
301 #define WM_T_82544 3 /* i82544 */
302 #define WM_T_82540 4 /* i82540 */
303 #define WM_T_82545 5 /* i82545 */
304 #define WM_T_82546 6 /* i82546 */
305
306 /* sc_flags */
307 #define WM_F_HAS_MII 0x01 /* has MII */
308 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
312 #else
313 #define WM_EVCNT_INCR(ev) /* nothing */
314 #endif
315
316 #define CSR_READ(sc, reg) \
317 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
318 #define CSR_WRITE(sc, reg, val) \
319 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
320
321 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
322 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
323
324 #define WM_CDTXSYNC(sc, x, n, ops) \
325 do { \
326 int __x, __n; \
327 \
328 __x = (x); \
329 __n = (n); \
330 \
331 /* If it will wrap around, sync to the end of the ring. */ \
332 if ((__x + __n) > WM_NTXDESC) { \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
335 (WM_NTXDESC - __x), (ops)); \
336 __n -= (WM_NTXDESC - __x); \
337 __x = 0; \
338 } \
339 \
340 /* Now sync whatever is left. */ \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
343 } while (/*CONSTCOND*/0)
344
345 #define WM_CDRXSYNC(sc, x, ops) \
346 do { \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
349 } while (/*CONSTCOND*/0)
350
351 #define WM_INIT_RXDESC(sc, x) \
352 do { \
353 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
354 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
355 struct mbuf *__m = __rxs->rxs_mbuf; \
356 \
357 /* \
358 * Note: We scoot the packet forward 2 bytes in the buffer \
359 * so that the payload after the Ethernet header is aligned \
360 * to a 4-byte boundary. \
361 * \
362 * XXX BRAINDAMAGE ALERT! \
363 * The stupid chip uses the same size for every buffer, which \
364 * is set in the Receive Control register. We are using the 2K \
365 * size option, but what we REALLY want is (2K - 2)! For this \
366 * reason, we can't accept packets longer than the standard \
367 * Ethernet MTU, without incurring a big penalty to copy every \
368 * incoming packet to a new, suitably aligned buffer. \
369 * \
370 * We'll need to make some changes to the layer 3/4 parts of \
371 * the stack (to copy the headers to a new buffer if not \
372 * aligned) in order to support large MTU on this chip. Lame. \
373 */ \
374 __m->m_data = __m->m_ext.ext_buf + 2; \
375 \
376 __rxd->wrx_addr.wa_low = \
377 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
378 __rxd->wrx_addr.wa_high = 0; \
379 __rxd->wrx_len = 0; \
380 __rxd->wrx_cksum = 0; \
381 __rxd->wrx_status = 0; \
382 __rxd->wrx_errors = 0; \
383 __rxd->wrx_special = 0; \
384 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
385 \
386 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
387 } while (/*CONSTCOND*/0)
388
389 void wm_start(struct ifnet *);
390 void wm_watchdog(struct ifnet *);
391 int wm_ioctl(struct ifnet *, u_long, caddr_t);
392 int wm_init(struct ifnet *);
393 void wm_stop(struct ifnet *, int);
394
395 void wm_shutdown(void *);
396
397 void wm_reset(struct wm_softc *);
398 void wm_rxdrain(struct wm_softc *);
399 int wm_add_rxbuf(struct wm_softc *, int);
400 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
401 void wm_tick(void *);
402
403 void wm_set_filter(struct wm_softc *);
404
405 int wm_intr(void *);
406 void wm_txintr(struct wm_softc *);
407 void wm_rxintr(struct wm_softc *);
408 void wm_linkintr(struct wm_softc *, uint32_t);
409
410 void wm_tbi_mediainit(struct wm_softc *);
411 int wm_tbi_mediachange(struct ifnet *);
412 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
413
414 void wm_tbi_set_linkled(struct wm_softc *);
415 void wm_tbi_check_link(struct wm_softc *);
416
417 void wm_gmii_reset(struct wm_softc *);
418
419 int wm_gmii_i82543_readreg(struct device *, int, int);
420 void wm_gmii_i82543_writereg(struct device *, int, int, int);
421
422 int wm_gmii_i82544_readreg(struct device *, int, int);
423 void wm_gmii_i82544_writereg(struct device *, int, int, int);
424
425 void wm_gmii_statchg(struct device *);
426
427 void wm_gmii_mediainit(struct wm_softc *);
428 int wm_gmii_mediachange(struct ifnet *);
429 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
430
431 int wm_match(struct device *, struct cfdata *, void *);
432 void wm_attach(struct device *, struct device *, void *);
433
434 CFATTACH_DECL(wm, sizeof(struct wm_softc),
435 wm_match, wm_attach, NULL, NULL);
436
437 /*
438 * Devices supported by this driver.
439 */
440 const struct wm_product {
441 pci_vendor_id_t wmp_vendor;
442 pci_product_id_t wmp_product;
443 const char *wmp_name;
444 int wmp_type;
445 int wmp_flags;
446 #define WMP_F_1000X 0x01
447 #define WMP_F_1000T 0x02
448 } wm_products[] = {
449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
450 "Intel i82542 1000BASE-X Ethernet",
451 WM_T_82542_2_1, WMP_F_1000X },
452
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
454 "Intel i82543GC 1000BASE-X Ethernet",
455 WM_T_82543, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
458 "Intel i82543GC 1000BASE-T Ethernet",
459 WM_T_82543, WMP_F_1000T },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
462 "Intel i82544EI 1000BASE-T Ethernet",
463 WM_T_82544, WMP_F_1000T },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
466 "Intel i82544EI 1000BASE-X Ethernet",
467 WM_T_82544, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
470 "Intel i82544GC 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
474 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
475 WM_T_82544, WMP_F_1000T },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
478 "Intel i82540EM 1000BASE-T Ethernet",
479 WM_T_82540, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LOM,
482 "Intel i82540EP 1000BASE-T Ethernet",
483 WM_T_82540, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP,
486 "Intel i82540EP 1000BASE-T Ethernet",
487 WM_T_82540, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EP_LP,
490 "Intel i82540EP 1000BASE-T Ethernet",
491 WM_T_82540, WMP_F_1000T },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
494 "Intel i82545EM 1000BASE-T Ethernet",
495 WM_T_82545, WMP_F_1000T },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
498 "Intel i82546EB 1000BASE-T Ethernet",
499 WM_T_82546, WMP_F_1000T },
500
501 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
502 "Intel i82545EM 1000BASE-X Ethernet",
503 WM_T_82545, WMP_F_1000X },
504
505 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
506 "Intel i82546EB 1000BASE-X Ethernet",
507 WM_T_82546, WMP_F_1000X },
508
509 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
510 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
511 WM_T_82540, WMP_F_1000T },
512
513 { 0, 0,
514 NULL,
515 0, 0 },
516 };
517
518 #ifdef WM_EVENT_COUNTERS
519 #if WM_NTXSEGS != 16
520 #error Update wm_txseg_evcnt_names
521 #endif
522 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
523 "txseg1",
524 "txseg2",
525 "txseg3",
526 "txseg4",
527 "txseg5",
528 "txseg6",
529 "txseg7",
530 "txseg8",
531 "txseg9",
532 "txseg10",
533 "txseg11",
534 "txseg12",
535 "txseg13",
536 "txseg14",
537 "txseg15",
538 "txseg16",
539 };
540 #endif /* WM_EVENT_COUNTERS */
541
542 static const struct wm_product *
543 wm_lookup(const struct pci_attach_args *pa)
544 {
545 const struct wm_product *wmp;
546
547 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
548 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
549 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
550 return (wmp);
551 }
552 return (NULL);
553 }
554
555 int
556 wm_match(struct device *parent, struct cfdata *cf, void *aux)
557 {
558 struct pci_attach_args *pa = aux;
559
560 if (wm_lookup(pa) != NULL)
561 return (1);
562
563 return (0);
564 }
565
566 void
567 wm_attach(struct device *parent, struct device *self, void *aux)
568 {
569 struct wm_softc *sc = (void *) self;
570 struct pci_attach_args *pa = aux;
571 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
572 pci_chipset_tag_t pc = pa->pa_pc;
573 pci_intr_handle_t ih;
574 const char *intrstr = NULL;
575 bus_space_tag_t memt;
576 bus_space_handle_t memh;
577 bus_dma_segment_t seg;
578 int memh_valid;
579 int i, rseg, error;
580 const struct wm_product *wmp;
581 uint8_t enaddr[ETHER_ADDR_LEN];
582 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
583 pcireg_t preg, memtype;
584 int pmreg;
585
586 callout_init(&sc->sc_tick_ch);
587
588 wmp = wm_lookup(pa);
589 if (wmp == NULL) {
590 printf("\n");
591 panic("wm_attach: impossible");
592 }
593
594 sc->sc_dmat = pa->pa_dmat;
595
596 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
597 aprint_naive(": Ethernet controller\n");
598 aprint_normal(": %s, rev. %d\n", wmp->wmp_name, preg);
599
600 sc->sc_type = wmp->wmp_type;
601 if (sc->sc_type < WM_T_82543) {
602 if (preg < 2) {
603 aprint_error("%s: i82542 must be at least rev. 2\n",
604 sc->sc_dev.dv_xname);
605 return;
606 }
607 if (preg < 3)
608 sc->sc_type = WM_T_82542_2_0;
609 }
610
611 /*
612 * Some chips require a handshake to access the EEPROM.
613 */
614 if (sc->sc_type >= WM_T_82540)
615 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
616
617 /*
618 * Map the device.
619 */
620 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
621 switch (memtype) {
622 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
623 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
624 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
625 memtype, 0, &memt, &memh, NULL, NULL) == 0);
626 break;
627 default:
628 memh_valid = 0;
629 }
630
631 if (memh_valid) {
632 sc->sc_st = memt;
633 sc->sc_sh = memh;
634 } else {
635 aprint_error("%s: unable to map device registers\n",
636 sc->sc_dev.dv_xname);
637 return;
638 }
639
640 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
641 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
642 preg |= PCI_COMMAND_MASTER_ENABLE;
643 if (sc->sc_type < WM_T_82542_2_1)
644 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
645 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
646
647 /* Get it out of power save mode, if needed. */
648 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
649 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
650 PCI_PMCSR_STATE_MASK;
651 if (preg == PCI_PMCSR_STATE_D3) {
652 /*
653 * The card has lost all configuration data in
654 * this state, so punt.
655 */
656 aprint_error("%s: unable to wake from power state D3\n",
657 sc->sc_dev.dv_xname);
658 return;
659 }
660 if (preg != PCI_PMCSR_STATE_D0) {
661 aprint_normal("%s: waking up from power state D%d\n",
662 sc->sc_dev.dv_xname, preg);
663 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
664 PCI_PMCSR_STATE_D0);
665 }
666 }
667
668 /*
669 * Map and establish our interrupt.
670 */
671 if (pci_intr_map(pa, &ih)) {
672 aprint_error("%s: unable to map interrupt\n",
673 sc->sc_dev.dv_xname);
674 return;
675 }
676 intrstr = pci_intr_string(pc, ih);
677 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
678 if (sc->sc_ih == NULL) {
679 aprint_error("%s: unable to establish interrupt",
680 sc->sc_dev.dv_xname);
681 if (intrstr != NULL)
682 aprint_normal(" at %s", intrstr);
683 aprint_normal("\n");
684 return;
685 }
686 aprint_normal("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
687
688 /*
689 * Allocate the control data structures, and create and load the
690 * DMA map for it.
691 */
692 if ((error = bus_dmamem_alloc(sc->sc_dmat,
693 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
694 0)) != 0) {
695 aprint_error(
696 "%s: unable to allocate control data, error = %d\n",
697 sc->sc_dev.dv_xname, error);
698 goto fail_0;
699 }
700
701 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
702 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
703 0)) != 0) {
704 aprint_error("%s: unable to map control data, error = %d\n",
705 sc->sc_dev.dv_xname, error);
706 goto fail_1;
707 }
708
709 if ((error = bus_dmamap_create(sc->sc_dmat,
710 sizeof(struct wm_control_data), 1,
711 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
712 aprint_error("%s: unable to create control data DMA map, "
713 "error = %d\n", sc->sc_dev.dv_xname, error);
714 goto fail_2;
715 }
716
717 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
718 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
719 0)) != 0) {
720 aprint_error(
721 "%s: unable to load control data DMA map, error = %d\n",
722 sc->sc_dev.dv_xname, error);
723 goto fail_3;
724 }
725
726 /*
727 * Create the transmit buffer DMA maps.
728 */
729 for (i = 0; i < WM_TXQUEUELEN; i++) {
730 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
731 WM_NTXSEGS, MCLBYTES, 0, 0,
732 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
733 aprint_error("%s: unable to create Tx DMA map %d, "
734 "error = %d\n", sc->sc_dev.dv_xname, i, error);
735 goto fail_4;
736 }
737 }
738
739 /*
740 * Create the receive buffer DMA maps.
741 */
742 for (i = 0; i < WM_NRXDESC; i++) {
743 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
744 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
745 aprint_error("%s: unable to create Rx DMA map %d, "
746 "error = %d\n", sc->sc_dev.dv_xname, i, error);
747 goto fail_5;
748 }
749 sc->sc_rxsoft[i].rxs_mbuf = NULL;
750 }
751
752 /*
753 * Reset the chip to a known state.
754 */
755 wm_reset(sc);
756
757 /*
758 * Read the Ethernet address from the EEPROM.
759 */
760 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
761 sizeof(myea) / sizeof(myea[0]), myea);
762 enaddr[0] = myea[0] & 0xff;
763 enaddr[1] = myea[0] >> 8;
764 enaddr[2] = myea[1] & 0xff;
765 enaddr[3] = myea[1] >> 8;
766 enaddr[4] = myea[2] & 0xff;
767 enaddr[5] = myea[2] >> 8;
768
769 /*
770 * Toggle the LSB of the MAC address on the second port
771 * of the i82546.
772 */
773 if (sc->sc_type == WM_T_82546) {
774 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
775 enaddr[5] ^= 1;
776 }
777
778 aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
779 ether_sprintf(enaddr));
780
781 /*
782 * Read the config info from the EEPROM, and set up various
783 * bits in the control registers based on their contents.
784 */
785 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
786 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
787 if (sc->sc_type >= WM_T_82544)
788 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
789
790 if (cfg1 & EEPROM_CFG1_ILOS)
791 sc->sc_ctrl |= CTRL_ILOS;
792 if (sc->sc_type >= WM_T_82544) {
793 sc->sc_ctrl |=
794 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
795 CTRL_SWDPIO_SHIFT;
796 sc->sc_ctrl |=
797 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
798 CTRL_SWDPINS_SHIFT;
799 } else {
800 sc->sc_ctrl |=
801 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
802 CTRL_SWDPIO_SHIFT;
803 }
804
805 #if 0
806 if (sc->sc_type >= WM_T_82544) {
807 if (cfg1 & EEPROM_CFG1_IPS0)
808 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
809 if (cfg1 & EEPROM_CFG1_IPS1)
810 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
811 sc->sc_ctrl_ext |=
812 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
813 CTRL_EXT_SWDPIO_SHIFT;
814 sc->sc_ctrl_ext |=
815 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
816 CTRL_EXT_SWDPINS_SHIFT;
817 } else {
818 sc->sc_ctrl_ext |=
819 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
820 CTRL_EXT_SWDPIO_SHIFT;
821 }
822 #endif
823
824 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
825 #if 0
826 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
827 #endif
828
829 /*
830 * Set up some register offsets that are different between
831 * the i82542 and the i82543 and later chips.
832 */
833 if (sc->sc_type < WM_T_82543) {
834 sc->sc_rdt_reg = WMREG_OLD_RDT0;
835 sc->sc_tdt_reg = WMREG_OLD_TDT;
836 } else {
837 sc->sc_rdt_reg = WMREG_RDT;
838 sc->sc_tdt_reg = WMREG_TDT;
839 }
840
841 /*
842 * Determine if we should use flow control. We should
843 * always use it, unless we're on a i82542 < 2.1.
844 */
845 if (sc->sc_type >= WM_T_82542_2_1)
846 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
847
848 /*
849 * Determine if we're TBI or GMII mode, and initialize the
850 * media structures accordingly.
851 */
852 if (sc->sc_type < WM_T_82543 ||
853 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
854 if (wmp->wmp_flags & WMP_F_1000T)
855 aprint_error("%s: WARNING: TBIMODE set on 1000BASE-T "
856 "product!\n", sc->sc_dev.dv_xname);
857 wm_tbi_mediainit(sc);
858 } else {
859 if (wmp->wmp_flags & WMP_F_1000X)
860 aprint_error("%s: WARNING: TBIMODE clear on 1000BASE-X "
861 "product!\n", sc->sc_dev.dv_xname);
862 wm_gmii_mediainit(sc);
863 }
864
865 ifp = &sc->sc_ethercom.ec_if;
866 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
867 ifp->if_softc = sc;
868 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
869 ifp->if_ioctl = wm_ioctl;
870 ifp->if_start = wm_start;
871 ifp->if_watchdog = wm_watchdog;
872 ifp->if_init = wm_init;
873 ifp->if_stop = wm_stop;
874 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
875 IFQ_SET_READY(&ifp->if_snd);
876
877 /*
878 * If we're a i82543 or greater, we can support VLANs.
879 */
880 if (sc->sc_type >= WM_T_82543)
881 sc->sc_ethercom.ec_capabilities |=
882 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
883
884 /*
885 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
886 * on i82543 and later.
887 */
888 if (sc->sc_type >= WM_T_82543)
889 ifp->if_capabilities |=
890 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
891
892 /*
893 * Attach the interface.
894 */
895 if_attach(ifp);
896 ether_ifattach(ifp, enaddr);
897 #if NRND > 0
898 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
899 RND_TYPE_NET, 0);
900 #endif
901
902 #ifdef WM_EVENT_COUNTERS
903 /* Attach event counters. */
904 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
905 NULL, sc->sc_dev.dv_xname, "txsstall");
906 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
907 NULL, sc->sc_dev.dv_xname, "txdstall");
908 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
909 NULL, sc->sc_dev.dv_xname, "txforceintr");
910 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
911 NULL, sc->sc_dev.dv_xname, "txdw");
912 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
913 NULL, sc->sc_dev.dv_xname, "txqe");
914 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
915 NULL, sc->sc_dev.dv_xname, "rxintr");
916 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
917 NULL, sc->sc_dev.dv_xname, "linkintr");
918
919 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
920 NULL, sc->sc_dev.dv_xname, "rxipsum");
921 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
922 NULL, sc->sc_dev.dv_xname, "rxtusum");
923 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
924 NULL, sc->sc_dev.dv_xname, "txipsum");
925 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
926 NULL, sc->sc_dev.dv_xname, "txtusum");
927
928 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
929 NULL, sc->sc_dev.dv_xname, "txctx init");
930 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
931 NULL, sc->sc_dev.dv_xname, "txctx hit");
932 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
933 NULL, sc->sc_dev.dv_xname, "txctx miss");
934
935 for (i = 0; i < WM_NTXSEGS; i++)
936 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
937 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
938
939 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
940 NULL, sc->sc_dev.dv_xname, "txdrop");
941
942 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
943 NULL, sc->sc_dev.dv_xname, "tu");
944 #endif /* WM_EVENT_COUNTERS */
945
946 /*
947 * Make sure the interface is shutdown during reboot.
948 */
949 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
950 if (sc->sc_sdhook == NULL)
951 aprint_error("%s: WARNING: unable to establish shutdown hook\n",
952 sc->sc_dev.dv_xname);
953 return;
954
955 /*
956 * Free any resources we've allocated during the failed attach
957 * attempt. Do this in reverse order and fall through.
958 */
959 fail_5:
960 for (i = 0; i < WM_NRXDESC; i++) {
961 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
962 bus_dmamap_destroy(sc->sc_dmat,
963 sc->sc_rxsoft[i].rxs_dmamap);
964 }
965 fail_4:
966 for (i = 0; i < WM_TXQUEUELEN; i++) {
967 if (sc->sc_txsoft[i].txs_dmamap != NULL)
968 bus_dmamap_destroy(sc->sc_dmat,
969 sc->sc_txsoft[i].txs_dmamap);
970 }
971 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
972 fail_3:
973 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
974 fail_2:
975 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
976 sizeof(struct wm_control_data));
977 fail_1:
978 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
979 fail_0:
980 return;
981 }
982
983 /*
984 * wm_shutdown:
985 *
986 * Make sure the interface is stopped at reboot time.
987 */
988 void
989 wm_shutdown(void *arg)
990 {
991 struct wm_softc *sc = arg;
992
993 wm_stop(&sc->sc_ethercom.ec_if, 1);
994 }
995
996 /*
997 * wm_tx_cksum:
998 *
999 * Set up TCP/IP checksumming parameters for the
1000 * specified packet.
1001 */
1002 static int
1003 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
1004 uint32_t *fieldsp)
1005 {
1006 struct mbuf *m0 = txs->txs_mbuf;
1007 struct livengood_tcpip_ctxdesc *t;
1008 uint32_t fields = 0, ipcs, tucs;
1009 struct ip *ip;
1010 struct ether_header *eh;
1011 int offset, iphl;
1012
1013 /*
1014 * XXX It would be nice if the mbuf pkthdr had offset
1015 * fields for the protocol headers.
1016 */
1017
1018 eh = mtod(m0, struct ether_header *);
1019 switch (htons(eh->ether_type)) {
1020 case ETHERTYPE_IP:
1021 iphl = sizeof(struct ip);
1022 offset = ETHER_HDR_LEN;
1023 break;
1024
1025 case ETHERTYPE_VLAN:
1026 iphl = sizeof(struct ip);
1027 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
1028 break;
1029
1030 default:
1031 /*
1032 * Don't support this protocol or encapsulation.
1033 */
1034 *fieldsp = 0;
1035 *cmdp = 0;
1036 return (0);
1037 }
1038
1039 if (m0->m_len < (offset + iphl)) {
1040 if ((txs->txs_mbuf = m_pullup(m0, offset + iphl)) == NULL) {
1041 printf("%s: wm_tx_cksum: mbuf allocation failed, "
1042 "packet dropped\n", sc->sc_dev.dv_xname);
1043 return (ENOMEM);
1044 }
1045 m0 = txs->txs_mbuf;
1046 }
1047
1048 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1049 iphl = ip->ip_hl << 2;
1050
1051 /*
1052 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1053 * offload feature, if we load the context descriptor, we
1054 * MUST provide valid values for IPCSS and TUCSS fields.
1055 */
1056
1057 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1058 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1059 fields |= htole32(WTX_IXSM);
1060 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1061 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1062 WTX_TCPIP_IPCSE(offset + iphl - 1));
1063 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1064 /* Use the cached value. */
1065 ipcs = sc->sc_txctx_ipcs;
1066 } else {
1067 /* Just initialize it to the likely value anyway. */
1068 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1069 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1070 WTX_TCPIP_IPCSE(offset + iphl - 1));
1071 }
1072
1073 offset += iphl;
1074
1075 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1076 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1077 fields |= htole32(WTX_TXSM);
1078 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1079 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1080 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1081 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1082 /* Use the cached value. */
1083 tucs = sc->sc_txctx_tucs;
1084 } else {
1085 /* Just initialize it to a valid TCP context. */
1086 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1087 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1088 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1089 }
1090
1091 if (sc->sc_txctx_ipcs == ipcs &&
1092 sc->sc_txctx_tucs == tucs) {
1093 /* Cached context is fine. */
1094 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1095 } else {
1096 /* Fill in the context descriptor. */
1097 #ifdef WM_EVENT_COUNTERS
1098 if (sc->sc_txctx_ipcs == 0xffffffff &&
1099 sc->sc_txctx_tucs == 0xffffffff)
1100 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1101 else
1102 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1103 #endif
1104 t = (struct livengood_tcpip_ctxdesc *)
1105 &sc->sc_txdescs[sc->sc_txnext];
1106 t->tcpip_ipcs = ipcs;
1107 t->tcpip_tucs = tucs;
1108 t->tcpip_cmdlen =
1109 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1110 t->tcpip_seg = 0;
1111 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1112
1113 sc->sc_txctx_ipcs = ipcs;
1114 sc->sc_txctx_tucs = tucs;
1115
1116 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1117 txs->txs_ndesc++;
1118 }
1119
1120 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1121 *fieldsp = fields;
1122
1123 return (0);
1124 }
1125
1126 /*
1127 * wm_start: [ifnet interface function]
1128 *
1129 * Start packet transmission on the interface.
1130 */
1131 void
1132 wm_start(struct ifnet *ifp)
1133 {
1134 struct wm_softc *sc = ifp->if_softc;
1135 struct mbuf *m0;
1136 #if 0 /* XXXJRT */
1137 struct m_tag *mtag;
1138 #endif
1139 struct wm_txsoft *txs;
1140 bus_dmamap_t dmamap;
1141 int error, nexttx, lasttx, ofree, seg;
1142 uint32_t cksumcmd, cksumfields;
1143
1144 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1145 return;
1146
1147 /*
1148 * Remember the previous number of free descriptors.
1149 */
1150 ofree = sc->sc_txfree;
1151
1152 /*
1153 * Loop through the send queue, setting up transmit descriptors
1154 * until we drain the queue, or use up all available transmit
1155 * descriptors.
1156 */
1157 for (;;) {
1158 /* Grab a packet off the queue. */
1159 IFQ_POLL(&ifp->if_snd, m0);
1160 if (m0 == NULL)
1161 break;
1162
1163 DPRINTF(WM_DEBUG_TX,
1164 ("%s: TX: have packet to transmit: %p\n",
1165 sc->sc_dev.dv_xname, m0));
1166
1167 /* Get a work queue entry. */
1168 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1169 wm_txintr(sc);
1170 if (sc->sc_txsfree == 0) {
1171 DPRINTF(WM_DEBUG_TX,
1172 ("%s: TX: no free job descriptors\n",
1173 sc->sc_dev.dv_xname));
1174 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1175 break;
1176 }
1177 }
1178
1179 txs = &sc->sc_txsoft[sc->sc_txsnext];
1180 dmamap = txs->txs_dmamap;
1181
1182 /*
1183 * Load the DMA map. If this fails, the packet either
1184 * didn't fit in the allotted number of segments, or we
1185 * were short on resources. For the too-many-segments
1186 * case, we simply report an error and drop the packet,
1187 * since we can't sanely copy a jumbo packet to a single
1188 * buffer.
1189 */
1190 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1191 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1192 if (error) {
1193 if (error == EFBIG) {
1194 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1195 printf("%s: Tx packet consumes too many "
1196 "DMA segments, dropping...\n",
1197 sc->sc_dev.dv_xname);
1198 IFQ_DEQUEUE(&ifp->if_snd, m0);
1199 m_freem(m0);
1200 continue;
1201 }
1202 /*
1203 * Short on resources, just stop for now.
1204 */
1205 DPRINTF(WM_DEBUG_TX,
1206 ("%s: TX: dmamap load failed: %d\n",
1207 sc->sc_dev.dv_xname, error));
1208 break;
1209 }
1210
1211 /*
1212 * Ensure we have enough descriptors free to describe
1213 * the packet. Note, we always reserve one descriptor
1214 * at the end of the ring due to the semantics of the
1215 * TDT register, plus one more in the event we need
1216 * to re-load checksum offload context.
1217 */
1218 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1219 /*
1220 * Not enough free descriptors to transmit this
1221 * packet. We haven't committed anything yet,
1222 * so just unload the DMA map, put the packet
1223 * pack on the queue, and punt. Notify the upper
1224 * layer that there are no more slots left.
1225 */
1226 DPRINTF(WM_DEBUG_TX,
1227 ("%s: TX: need %d descriptors, have %d\n",
1228 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1229 sc->sc_txfree - 1));
1230 ifp->if_flags |= IFF_OACTIVE;
1231 bus_dmamap_unload(sc->sc_dmat, dmamap);
1232 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1233 break;
1234 }
1235
1236 IFQ_DEQUEUE(&ifp->if_snd, m0);
1237
1238 /*
1239 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1240 */
1241
1242 /* Sync the DMA map. */
1243 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1244 BUS_DMASYNC_PREWRITE);
1245
1246 DPRINTF(WM_DEBUG_TX,
1247 ("%s: TX: packet has %d DMA segments\n",
1248 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1249
1250 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1251
1252 /*
1253 * Store a pointer to the packet so that we can free it
1254 * later.
1255 *
1256 * Initially, we consider the number of descriptors the
1257 * packet uses the number of DMA segments. This may be
1258 * incremented by 1 if we do checksum offload (a descriptor
1259 * is used to set the checksum context).
1260 */
1261 txs->txs_mbuf = m0;
1262 txs->txs_firstdesc = sc->sc_txnext;
1263 txs->txs_ndesc = dmamap->dm_nsegs;
1264
1265 /*
1266 * Set up checksum offload parameters for
1267 * this packet.
1268 */
1269 if (m0->m_pkthdr.csum_flags &
1270 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1271 if (wm_tx_cksum(sc, txs, &cksumcmd,
1272 &cksumfields) != 0) {
1273 /* Error message already displayed. */
1274 bus_dmamap_unload(sc->sc_dmat, dmamap);
1275 continue;
1276 }
1277 } else {
1278 cksumcmd = 0;
1279 cksumfields = 0;
1280 }
1281
1282 cksumcmd |= htole32(WTX_CMD_IDE);
1283
1284 /*
1285 * Initialize the transmit descriptor.
1286 */
1287 for (nexttx = sc->sc_txnext, seg = 0;
1288 seg < dmamap->dm_nsegs;
1289 seg++, nexttx = WM_NEXTTX(nexttx)) {
1290 /*
1291 * Note: we currently only use 32-bit DMA
1292 * addresses.
1293 */
1294 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1295 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1296 htole32(dmamap->dm_segs[seg].ds_addr);
1297 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1298 htole32(dmamap->dm_segs[seg].ds_len);
1299 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1300 cksumfields;
1301 lasttx = nexttx;
1302
1303 DPRINTF(WM_DEBUG_TX,
1304 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1305 sc->sc_dev.dv_xname, nexttx,
1306 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1307 (uint32_t) dmamap->dm_segs[seg].ds_len));
1308 }
1309
1310 /*
1311 * Set up the command byte on the last descriptor of
1312 * the packet. If we're in the interrupt delay window,
1313 * delay the interrupt.
1314 */
1315 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1316 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1317
1318 #if 0 /* XXXJRT */
1319 /*
1320 * If VLANs are enabled and the packet has a VLAN tag, set
1321 * up the descriptor to encapsulate the packet for us.
1322 *
1323 * This is only valid on the last descriptor of the packet.
1324 */
1325 if (sc->sc_ethercom.ec_nvlans != 0 &&
1326 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1327 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1328 htole32(WTX_CMD_VLE);
1329 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1330 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1331 }
1332 #endif /* XXXJRT */
1333
1334 txs->txs_lastdesc = lasttx;
1335
1336 DPRINTF(WM_DEBUG_TX,
1337 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1338 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1339
1340 /* Sync the descriptors we're using. */
1341 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1342 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1343
1344 /* Give the packet to the chip. */
1345 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1346
1347 DPRINTF(WM_DEBUG_TX,
1348 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1349
1350 DPRINTF(WM_DEBUG_TX,
1351 ("%s: TX: finished transmitting packet, job %d\n",
1352 sc->sc_dev.dv_xname, sc->sc_txsnext));
1353
1354 /* Advance the tx pointer. */
1355 sc->sc_txfree -= txs->txs_ndesc;
1356 sc->sc_txnext = nexttx;
1357
1358 sc->sc_txsfree--;
1359 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1360
1361 #if NBPFILTER > 0
1362 /* Pass the packet to any BPF listeners. */
1363 if (ifp->if_bpf)
1364 bpf_mtap(ifp->if_bpf, m0);
1365 #endif /* NBPFILTER > 0 */
1366 }
1367
1368 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1369 /* No more slots; notify upper layer. */
1370 ifp->if_flags |= IFF_OACTIVE;
1371 }
1372
1373 if (sc->sc_txfree != ofree) {
1374 /* Set a watchdog timer in case the chip flakes out. */
1375 ifp->if_timer = 5;
1376 }
1377 }
1378
1379 /*
1380 * wm_watchdog: [ifnet interface function]
1381 *
1382 * Watchdog timer handler.
1383 */
1384 void
1385 wm_watchdog(struct ifnet *ifp)
1386 {
1387 struct wm_softc *sc = ifp->if_softc;
1388
1389 /*
1390 * Since we're using delayed interrupts, sweep up
1391 * before we report an error.
1392 */
1393 wm_txintr(sc);
1394
1395 if (sc->sc_txfree != WM_NTXDESC) {
1396 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1397 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1398 sc->sc_txnext);
1399 ifp->if_oerrors++;
1400
1401 /* Reset the interface. */
1402 (void) wm_init(ifp);
1403 }
1404
1405 /* Try to get more packets going. */
1406 wm_start(ifp);
1407 }
1408
1409 /*
1410 * wm_ioctl: [ifnet interface function]
1411 *
1412 * Handle control requests from the operator.
1413 */
1414 int
1415 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1416 {
1417 struct wm_softc *sc = ifp->if_softc;
1418 struct ifreq *ifr = (struct ifreq *) data;
1419 int s, error;
1420
1421 s = splnet();
1422
1423 switch (cmd) {
1424 case SIOCSIFMEDIA:
1425 case SIOCGIFMEDIA:
1426 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1427 break;
1428
1429 default:
1430 error = ether_ioctl(ifp, cmd, data);
1431 if (error == ENETRESET) {
1432 /*
1433 * Multicast list has changed; set the hardware filter
1434 * accordingly.
1435 */
1436 wm_set_filter(sc);
1437 error = 0;
1438 }
1439 break;
1440 }
1441
1442 /* Try to get more packets going. */
1443 wm_start(ifp);
1444
1445 splx(s);
1446 return (error);
1447 }
1448
1449 /*
1450 * wm_intr:
1451 *
1452 * Interrupt service routine.
1453 */
1454 int
1455 wm_intr(void *arg)
1456 {
1457 struct wm_softc *sc = arg;
1458 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1459 uint32_t icr;
1460 int wantinit, handled = 0;
1461
1462 for (wantinit = 0; wantinit == 0;) {
1463 icr = CSR_READ(sc, WMREG_ICR);
1464 if ((icr & sc->sc_icr) == 0)
1465 break;
1466
1467 #if 0 /*NRND > 0*/
1468 if (RND_ENABLED(&sc->rnd_source))
1469 rnd_add_uint32(&sc->rnd_source, icr);
1470 #endif
1471
1472 handled = 1;
1473
1474 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1475 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1476 DPRINTF(WM_DEBUG_RX,
1477 ("%s: RX: got Rx intr 0x%08x\n",
1478 sc->sc_dev.dv_xname,
1479 icr & (ICR_RXDMT0|ICR_RXT0)));
1480 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1481 }
1482 #endif
1483 wm_rxintr(sc);
1484
1485 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1486 if (icr & ICR_TXDW) {
1487 DPRINTF(WM_DEBUG_TX,
1488 ("%s: TX: got TDXW interrupt\n",
1489 sc->sc_dev.dv_xname));
1490 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1491 }
1492 #endif
1493 wm_txintr(sc);
1494
1495 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1496 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1497 wm_linkintr(sc, icr);
1498 }
1499
1500 if (icr & ICR_RXO) {
1501 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1502 wantinit = 1;
1503 }
1504 }
1505
1506 if (handled) {
1507 if (wantinit)
1508 wm_init(ifp);
1509
1510 /* Try to get more packets going. */
1511 wm_start(ifp);
1512 }
1513
1514 return (handled);
1515 }
1516
1517 /*
1518 * wm_txintr:
1519 *
1520 * Helper; handle transmit interrupts.
1521 */
1522 void
1523 wm_txintr(struct wm_softc *sc)
1524 {
1525 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1526 struct wm_txsoft *txs;
1527 uint8_t status;
1528 int i;
1529
1530 ifp->if_flags &= ~IFF_OACTIVE;
1531
1532 /*
1533 * Go through the Tx list and free mbufs for those
1534 * frames which have been transmitted.
1535 */
1536 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1537 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1538 txs = &sc->sc_txsoft[i];
1539
1540 DPRINTF(WM_DEBUG_TX,
1541 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1542
1543 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1544 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1545
1546 status = le32toh(sc->sc_txdescs[
1547 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1548 if ((status & WTX_ST_DD) == 0) {
1549 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1550 BUS_DMASYNC_PREREAD);
1551 break;
1552 }
1553
1554 DPRINTF(WM_DEBUG_TX,
1555 ("%s: TX: job %d done: descs %d..%d\n",
1556 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1557 txs->txs_lastdesc));
1558
1559 /*
1560 * XXX We should probably be using the statistics
1561 * XXX registers, but I don't know if they exist
1562 * XXX on chips before the i82544.
1563 */
1564
1565 #ifdef WM_EVENT_COUNTERS
1566 if (status & WTX_ST_TU)
1567 WM_EVCNT_INCR(&sc->sc_ev_tu);
1568 #endif /* WM_EVENT_COUNTERS */
1569
1570 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1571 ifp->if_oerrors++;
1572 if (status & WTX_ST_LC)
1573 printf("%s: late collision\n",
1574 sc->sc_dev.dv_xname);
1575 else if (status & WTX_ST_EC) {
1576 ifp->if_collisions += 16;
1577 printf("%s: excessive collisions\n",
1578 sc->sc_dev.dv_xname);
1579 }
1580 } else
1581 ifp->if_opackets++;
1582
1583 sc->sc_txfree += txs->txs_ndesc;
1584 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1585 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1586 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1587 m_freem(txs->txs_mbuf);
1588 txs->txs_mbuf = NULL;
1589 }
1590
1591 /* Update the dirty transmit buffer pointer. */
1592 sc->sc_txsdirty = i;
1593 DPRINTF(WM_DEBUG_TX,
1594 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1595
1596 /*
1597 * If there are no more pending transmissions, cancel the watchdog
1598 * timer.
1599 */
1600 if (sc->sc_txsfree == WM_TXQUEUELEN)
1601 ifp->if_timer = 0;
1602 }
1603
1604 /*
1605 * wm_rxintr:
1606 *
1607 * Helper; handle receive interrupts.
1608 */
1609 void
1610 wm_rxintr(struct wm_softc *sc)
1611 {
1612 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1613 struct wm_rxsoft *rxs;
1614 struct mbuf *m;
1615 int i, len;
1616 uint8_t status, errors;
1617
1618 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1619 rxs = &sc->sc_rxsoft[i];
1620
1621 DPRINTF(WM_DEBUG_RX,
1622 ("%s: RX: checking descriptor %d\n",
1623 sc->sc_dev.dv_xname, i));
1624
1625 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1626
1627 status = sc->sc_rxdescs[i].wrx_status;
1628 errors = sc->sc_rxdescs[i].wrx_errors;
1629 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1630
1631 if ((status & WRX_ST_DD) == 0) {
1632 /*
1633 * We have processed all of the receive descriptors.
1634 */
1635 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1636 break;
1637 }
1638
1639 if (__predict_false(sc->sc_rxdiscard)) {
1640 DPRINTF(WM_DEBUG_RX,
1641 ("%s: RX: discarding contents of descriptor %d\n",
1642 sc->sc_dev.dv_xname, i));
1643 WM_INIT_RXDESC(sc, i);
1644 if (status & WRX_ST_EOP) {
1645 /* Reset our state. */
1646 DPRINTF(WM_DEBUG_RX,
1647 ("%s: RX: resetting rxdiscard -> 0\n",
1648 sc->sc_dev.dv_xname));
1649 sc->sc_rxdiscard = 0;
1650 }
1651 continue;
1652 }
1653
1654 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1655 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1656
1657 m = rxs->rxs_mbuf;
1658
1659 /*
1660 * Add a new receive buffer to the ring.
1661 */
1662 if (wm_add_rxbuf(sc, i) != 0) {
1663 /*
1664 * Failed, throw away what we've done so
1665 * far, and discard the rest of the packet.
1666 */
1667 ifp->if_ierrors++;
1668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1670 WM_INIT_RXDESC(sc, i);
1671 if ((status & WRX_ST_EOP) == 0)
1672 sc->sc_rxdiscard = 1;
1673 if (sc->sc_rxhead != NULL)
1674 m_freem(sc->sc_rxhead);
1675 WM_RXCHAIN_RESET(sc);
1676 DPRINTF(WM_DEBUG_RX,
1677 ("%s: RX: Rx buffer allocation failed, "
1678 "dropping packet%s\n", sc->sc_dev.dv_xname,
1679 sc->sc_rxdiscard ? " (discard)" : ""));
1680 continue;
1681 }
1682
1683 WM_RXCHAIN_LINK(sc, m);
1684
1685 m->m_len = len;
1686
1687 DPRINTF(WM_DEBUG_RX,
1688 ("%s: RX: buffer at %p len %d\n",
1689 sc->sc_dev.dv_xname, m->m_data, len));
1690
1691 /*
1692 * If this is not the end of the packet, keep
1693 * looking.
1694 */
1695 if ((status & WRX_ST_EOP) == 0) {
1696 sc->sc_rxlen += len;
1697 DPRINTF(WM_DEBUG_RX,
1698 ("%s: RX: not yet EOP, rxlen -> %d\n",
1699 sc->sc_dev.dv_xname, sc->sc_rxlen));
1700 continue;
1701 }
1702
1703 /*
1704 * Okay, we have the entire packet now...
1705 */
1706 *sc->sc_rxtailp = NULL;
1707 m = sc->sc_rxhead;
1708 len += sc->sc_rxlen;
1709
1710 WM_RXCHAIN_RESET(sc);
1711
1712 DPRINTF(WM_DEBUG_RX,
1713 ("%s: RX: have entire packet, len -> %d\n",
1714 sc->sc_dev.dv_xname, len));
1715
1716 /*
1717 * If an error occurred, update stats and drop the packet.
1718 */
1719 if (errors &
1720 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1721 ifp->if_ierrors++;
1722 if (errors & WRX_ER_SE)
1723 printf("%s: symbol error\n",
1724 sc->sc_dev.dv_xname);
1725 else if (errors & WRX_ER_SEQ)
1726 printf("%s: receive sequence error\n",
1727 sc->sc_dev.dv_xname);
1728 else if (errors & WRX_ER_CE)
1729 printf("%s: CRC error\n",
1730 sc->sc_dev.dv_xname);
1731 m_freem(m);
1732 continue;
1733 }
1734
1735 /*
1736 * No errors. Receive the packet.
1737 *
1738 * Note, we have configured the chip to include the
1739 * CRC with every packet.
1740 */
1741 m->m_flags |= M_HASFCS;
1742 m->m_pkthdr.rcvif = ifp;
1743 m->m_pkthdr.len = len;
1744
1745 #if 0 /* XXXJRT */
1746 /*
1747 * If VLANs are enabled, VLAN packets have been unwrapped
1748 * for us. Associate the tag with the packet.
1749 */
1750 if (sc->sc_ethercom.ec_nvlans != 0 &&
1751 (status & WRX_ST_VP) != 0) {
1752 struct m_tag *vtag;
1753
1754 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1755 M_NOWAIT);
1756 if (vtag == NULL) {
1757 ifp->if_ierrors++;
1758 printf("%s: unable to allocate VLAN tag\n",
1759 sc->sc_dev.dv_xname);
1760 m_freem(m);
1761 continue;
1762 }
1763
1764 *(u_int *)(vtag + 1) =
1765 le16toh(sc->sc_rxdescs[i].wrx_special);
1766 }
1767 #endif /* XXXJRT */
1768
1769 /*
1770 * Set up checksum info for this packet.
1771 */
1772 if (status & WRX_ST_IPCS) {
1773 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1774 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1775 if (errors & WRX_ER_IPE)
1776 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1777 }
1778 if (status & WRX_ST_TCPCS) {
1779 /*
1780 * Note: we don't know if this was TCP or UDP,
1781 * so we just set both bits, and expect the
1782 * upper layers to deal.
1783 */
1784 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1785 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1786 if (errors & WRX_ER_TCPE)
1787 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1788 }
1789
1790 ifp->if_ipackets++;
1791
1792 #if NBPFILTER > 0
1793 /* Pass this up to any BPF listeners. */
1794 if (ifp->if_bpf)
1795 bpf_mtap(ifp->if_bpf, m);
1796 #endif /* NBPFILTER > 0 */
1797
1798 /* Pass it on. */
1799 (*ifp->if_input)(ifp, m);
1800 }
1801
1802 /* Update the receive pointer. */
1803 sc->sc_rxptr = i;
1804
1805 DPRINTF(WM_DEBUG_RX,
1806 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1807 }
1808
1809 /*
1810 * wm_linkintr:
1811 *
1812 * Helper; handle link interrupts.
1813 */
1814 void
1815 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1816 {
1817 uint32_t status;
1818
1819 /*
1820 * If we get a link status interrupt on a 1000BASE-T
1821 * device, just fall into the normal MII tick path.
1822 */
1823 if (sc->sc_flags & WM_F_HAS_MII) {
1824 if (icr & ICR_LSC) {
1825 DPRINTF(WM_DEBUG_LINK,
1826 ("%s: LINK: LSC -> mii_tick\n",
1827 sc->sc_dev.dv_xname));
1828 mii_tick(&sc->sc_mii);
1829 } else if (icr & ICR_RXSEQ) {
1830 DPRINTF(WM_DEBUG_LINK,
1831 ("%s: LINK Receive sequence error\n",
1832 sc->sc_dev.dv_xname));
1833 }
1834 return;
1835 }
1836
1837 /*
1838 * If we are now receiving /C/, check for link again in
1839 * a couple of link clock ticks.
1840 */
1841 if (icr & ICR_RXCFG) {
1842 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1843 sc->sc_dev.dv_xname));
1844 sc->sc_tbi_anstate = 2;
1845 }
1846
1847 if (icr & ICR_LSC) {
1848 status = CSR_READ(sc, WMREG_STATUS);
1849 if (status & STATUS_LU) {
1850 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1851 sc->sc_dev.dv_xname,
1852 (status & STATUS_FD) ? "FDX" : "HDX"));
1853 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1854 if (status & STATUS_FD)
1855 sc->sc_tctl |=
1856 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1857 else
1858 sc->sc_tctl |=
1859 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1860 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1861 sc->sc_tbi_linkup = 1;
1862 } else {
1863 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1864 sc->sc_dev.dv_xname));
1865 sc->sc_tbi_linkup = 0;
1866 }
1867 sc->sc_tbi_anstate = 2;
1868 wm_tbi_set_linkled(sc);
1869 } else if (icr & ICR_RXSEQ) {
1870 DPRINTF(WM_DEBUG_LINK,
1871 ("%s: LINK: Receive sequence error\n",
1872 sc->sc_dev.dv_xname));
1873 }
1874 }
1875
1876 /*
1877 * wm_tick:
1878 *
1879 * One second timer, used to check link status, sweep up
1880 * completed transmit jobs, etc.
1881 */
1882 void
1883 wm_tick(void *arg)
1884 {
1885 struct wm_softc *sc = arg;
1886 int s;
1887
1888 s = splnet();
1889
1890 if (sc->sc_flags & WM_F_HAS_MII)
1891 mii_tick(&sc->sc_mii);
1892 else
1893 wm_tbi_check_link(sc);
1894
1895 splx(s);
1896
1897 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1898 }
1899
1900 /*
1901 * wm_reset:
1902 *
1903 * Reset the i82542 chip.
1904 */
1905 void
1906 wm_reset(struct wm_softc *sc)
1907 {
1908 int i;
1909
1910 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1911 delay(10000);
1912
1913 for (i = 0; i < 1000; i++) {
1914 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1915 return;
1916 delay(20);
1917 }
1918
1919 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1920 printf("%s: WARNING: reset failed to complete\n",
1921 sc->sc_dev.dv_xname);
1922 }
1923
1924 /*
1925 * wm_init: [ifnet interface function]
1926 *
1927 * Initialize the interface. Must be called at splnet().
1928 */
1929 int
1930 wm_init(struct ifnet *ifp)
1931 {
1932 struct wm_softc *sc = ifp->if_softc;
1933 struct wm_rxsoft *rxs;
1934 int i, error = 0;
1935 uint32_t reg;
1936
1937 /* Cancel any pending I/O. */
1938 wm_stop(ifp, 0);
1939
1940 /* Reset the chip to a known state. */
1941 wm_reset(sc);
1942
1943 /* Initialize the transmit descriptor ring. */
1944 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1945 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1946 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1947 sc->sc_txfree = WM_NTXDESC;
1948 sc->sc_txnext = 0;
1949
1950 sc->sc_txctx_ipcs = 0xffffffff;
1951 sc->sc_txctx_tucs = 0xffffffff;
1952
1953 if (sc->sc_type < WM_T_82543) {
1954 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1955 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1956 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1957 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1958 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1959 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1960 } else {
1961 CSR_WRITE(sc, WMREG_TBDAH, 0);
1962 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1963 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1964 CSR_WRITE(sc, WMREG_TDH, 0);
1965 CSR_WRITE(sc, WMREG_TDT, 0);
1966 CSR_WRITE(sc, WMREG_TIDV, 128);
1967
1968 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1969 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1970 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1971 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1972 }
1973 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1974 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1975
1976 /* Initialize the transmit job descriptors. */
1977 for (i = 0; i < WM_TXQUEUELEN; i++)
1978 sc->sc_txsoft[i].txs_mbuf = NULL;
1979 sc->sc_txsfree = WM_TXQUEUELEN;
1980 sc->sc_txsnext = 0;
1981 sc->sc_txsdirty = 0;
1982
1983 /*
1984 * Initialize the receive descriptor and receive job
1985 * descriptor rings.
1986 */
1987 if (sc->sc_type < WM_T_82543) {
1988 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1989 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1990 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1991 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1992 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1993 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1994
1995 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1996 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1997 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1998 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1999 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
2000 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
2001 } else {
2002 CSR_WRITE(sc, WMREG_RDBAH, 0);
2003 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
2004 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
2005 CSR_WRITE(sc, WMREG_RDH, 0);
2006 CSR_WRITE(sc, WMREG_RDT, 0);
2007 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
2008 }
2009 for (i = 0; i < WM_NRXDESC; i++) {
2010 rxs = &sc->sc_rxsoft[i];
2011 if (rxs->rxs_mbuf == NULL) {
2012 if ((error = wm_add_rxbuf(sc, i)) != 0) {
2013 printf("%s: unable to allocate or map rx "
2014 "buffer %d, error = %d\n",
2015 sc->sc_dev.dv_xname, i, error);
2016 /*
2017 * XXX Should attempt to run with fewer receive
2018 * XXX buffers instead of just failing.
2019 */
2020 wm_rxdrain(sc);
2021 goto out;
2022 }
2023 } else
2024 WM_INIT_RXDESC(sc, i);
2025 }
2026 sc->sc_rxptr = 0;
2027 sc->sc_rxdiscard = 0;
2028 WM_RXCHAIN_RESET(sc);
2029
2030 /*
2031 * Clear out the VLAN table -- we don't use it (yet).
2032 */
2033 CSR_WRITE(sc, WMREG_VET, 0);
2034 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2035 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2036
2037 /*
2038 * Set up flow-control parameters.
2039 *
2040 * XXX Values could probably stand some tuning.
2041 */
2042 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2043 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2044 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2045 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2046
2047 if (sc->sc_type < WM_T_82543) {
2048 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2049 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2050 } else {
2051 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2052 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2053 }
2054 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2055 }
2056
2057 #if 0 /* XXXJRT */
2058 /* Deal with VLAN enables. */
2059 if (sc->sc_ethercom.ec_nvlans != 0)
2060 sc->sc_ctrl |= CTRL_VME;
2061 else
2062 #endif /* XXXJRT */
2063 sc->sc_ctrl &= ~CTRL_VME;
2064
2065 /* Write the control registers. */
2066 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2067 #if 0
2068 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2069 #endif
2070
2071 /*
2072 * Set up checksum offload parameters.
2073 */
2074 reg = CSR_READ(sc, WMREG_RXCSUM);
2075 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2076 reg |= RXCSUM_IPOFL;
2077 else
2078 reg &= ~RXCSUM_IPOFL;
2079 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2080 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2081 else {
2082 reg &= ~RXCSUM_TUOFL;
2083 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2084 reg &= ~RXCSUM_IPOFL;
2085 }
2086 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2087
2088 /*
2089 * Set up the interrupt registers.
2090 */
2091 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2092 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2093 ICR_RXO | ICR_RXT0;
2094 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2095 sc->sc_icr |= ICR_RXCFG;
2096 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2097
2098 /* Set up the inter-packet gap. */
2099 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2100
2101 #if 0 /* XXXJRT */
2102 /* Set the VLAN ethernetype. */
2103 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2104 #endif
2105
2106 /*
2107 * Set up the transmit control register; we start out with
2108 * a collision distance suitable for FDX, but update it whe
2109 * we resolve the media type.
2110 */
2111 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2112 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2113 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2114
2115 /* Set the media. */
2116 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2117
2118 /*
2119 * Set up the receive control register; we actually program
2120 * the register when we set the receive filter. Use multicast
2121 * address offset type 0.
2122 *
2123 * Only the i82544 has the ability to strip the incoming
2124 * CRC, so we don't enable that feature.
2125 */
2126 sc->sc_mchash_type = 0;
2127 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2128 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2129
2130 /* Set the receive filter. */
2131 wm_set_filter(sc);
2132
2133 /* Start the one second link check clock. */
2134 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2135
2136 /* ...all done! */
2137 ifp->if_flags |= IFF_RUNNING;
2138 ifp->if_flags &= ~IFF_OACTIVE;
2139
2140 out:
2141 if (error)
2142 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2143 return (error);
2144 }
2145
2146 /*
2147 * wm_rxdrain:
2148 *
2149 * Drain the receive queue.
2150 */
2151 void
2152 wm_rxdrain(struct wm_softc *sc)
2153 {
2154 struct wm_rxsoft *rxs;
2155 int i;
2156
2157 for (i = 0; i < WM_NRXDESC; i++) {
2158 rxs = &sc->sc_rxsoft[i];
2159 if (rxs->rxs_mbuf != NULL) {
2160 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2161 m_freem(rxs->rxs_mbuf);
2162 rxs->rxs_mbuf = NULL;
2163 }
2164 }
2165 }
2166
2167 /*
2168 * wm_stop: [ifnet interface function]
2169 *
2170 * Stop transmission on the interface.
2171 */
2172 void
2173 wm_stop(struct ifnet *ifp, int disable)
2174 {
2175 struct wm_softc *sc = ifp->if_softc;
2176 struct wm_txsoft *txs;
2177 int i;
2178
2179 /* Stop the one second clock. */
2180 callout_stop(&sc->sc_tick_ch);
2181
2182 if (sc->sc_flags & WM_F_HAS_MII) {
2183 /* Down the MII. */
2184 mii_down(&sc->sc_mii);
2185 }
2186
2187 /* Stop the transmit and receive processes. */
2188 CSR_WRITE(sc, WMREG_TCTL, 0);
2189 CSR_WRITE(sc, WMREG_RCTL, 0);
2190
2191 /* Release any queued transmit buffers. */
2192 for (i = 0; i < WM_TXQUEUELEN; i++) {
2193 txs = &sc->sc_txsoft[i];
2194 if (txs->txs_mbuf != NULL) {
2195 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2196 m_freem(txs->txs_mbuf);
2197 txs->txs_mbuf = NULL;
2198 }
2199 }
2200
2201 if (disable)
2202 wm_rxdrain(sc);
2203
2204 /* Mark the interface as down and cancel the watchdog timer. */
2205 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2206 ifp->if_timer = 0;
2207 }
2208
2209 /*
2210 * wm_read_eeprom:
2211 *
2212 * Read data from the serial EEPROM.
2213 */
2214 void
2215 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2216 {
2217 uint32_t reg;
2218 int i, x, addrbits = 6;
2219
2220 for (i = 0; i < wordcnt; i++) {
2221 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2222 reg = CSR_READ(sc, WMREG_EECD);
2223
2224 /* Get number of address bits. */
2225 if (reg & EECD_EE_SIZE)
2226 addrbits = 8;
2227
2228 /* Request EEPROM access. */
2229 reg |= EECD_EE_REQ;
2230 CSR_WRITE(sc, WMREG_EECD, reg);
2231
2232 /* ..and wait for it to be granted. */
2233 for (x = 0; x < 100; x++) {
2234 reg = CSR_READ(sc, WMREG_EECD);
2235 if (reg & EECD_EE_GNT)
2236 break;
2237 delay(5);
2238 }
2239 if ((reg & EECD_EE_GNT) == 0) {
2240 printf("%s: could not acquire EEPROM GNT\n",
2241 sc->sc_dev.dv_xname);
2242 *data = 0xffff;
2243 reg &= ~EECD_EE_REQ;
2244 CSR_WRITE(sc, WMREG_EECD, reg);
2245 continue;
2246 }
2247 } else
2248 reg = 0;
2249
2250 /* Clear SK and DI. */
2251 reg &= ~(EECD_SK | EECD_DI);
2252 CSR_WRITE(sc, WMREG_EECD, reg);
2253
2254 /* Set CHIP SELECT. */
2255 reg |= EECD_CS;
2256 CSR_WRITE(sc, WMREG_EECD, reg);
2257 delay(2);
2258
2259 /* Shift in the READ command. */
2260 for (x = 3; x > 0; x--) {
2261 if (UWIRE_OPC_READ & (1 << (x - 1)))
2262 reg |= EECD_DI;
2263 else
2264 reg &= ~EECD_DI;
2265 CSR_WRITE(sc, WMREG_EECD, reg);
2266 delay(2);
2267 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2268 delay(2);
2269 CSR_WRITE(sc, WMREG_EECD, reg);
2270 delay(2);
2271 }
2272
2273 /* Shift in address. */
2274 for (x = addrbits; x > 0; x--) {
2275 if ((word + i) & (1 << (x - 1)))
2276 reg |= EECD_DI;
2277 else
2278 reg &= ~EECD_DI;
2279 CSR_WRITE(sc, WMREG_EECD, reg);
2280 delay(2);
2281 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2282 delay(2);
2283 CSR_WRITE(sc, WMREG_EECD, reg);
2284 delay(2);
2285 }
2286
2287 /* Shift out the data. */
2288 reg &= ~EECD_DI;
2289 data[i] = 0;
2290 for (x = 16; x > 0; x--) {
2291 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2292 delay(2);
2293 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2294 data[i] |= (1 << (x - 1));
2295 CSR_WRITE(sc, WMREG_EECD, reg);
2296 delay(2);
2297 }
2298
2299 /* Clear CHIP SELECT. */
2300 reg &= ~EECD_CS;
2301 CSR_WRITE(sc, WMREG_EECD, reg);
2302 delay(2);
2303
2304 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2305 /* Release the EEPROM. */
2306 reg &= ~EECD_EE_REQ;
2307 CSR_WRITE(sc, WMREG_EECD, reg);
2308 }
2309 }
2310 }
2311
2312 /*
2313 * wm_add_rxbuf:
2314 *
2315 * Add a receive buffer to the indiciated descriptor.
2316 */
2317 int
2318 wm_add_rxbuf(struct wm_softc *sc, int idx)
2319 {
2320 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2321 struct mbuf *m;
2322 int error;
2323
2324 MGETHDR(m, M_DONTWAIT, MT_DATA);
2325 if (m == NULL)
2326 return (ENOBUFS);
2327
2328 MCLGET(m, M_DONTWAIT);
2329 if ((m->m_flags & M_EXT) == 0) {
2330 m_freem(m);
2331 return (ENOBUFS);
2332 }
2333
2334 if (rxs->rxs_mbuf != NULL)
2335 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2336
2337 rxs->rxs_mbuf = m;
2338
2339 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
2340 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
2341 BUS_DMA_READ|BUS_DMA_NOWAIT);
2342 if (error) {
2343 printf("%s: unable to load rx DMA map %d, error = %d\n",
2344 sc->sc_dev.dv_xname, idx, error);
2345 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2346 }
2347
2348 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2349 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2350
2351 WM_INIT_RXDESC(sc, idx);
2352
2353 return (0);
2354 }
2355
2356 /*
2357 * wm_set_ral:
2358 *
2359 * Set an entery in the receive address list.
2360 */
2361 static void
2362 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2363 {
2364 uint32_t ral_lo, ral_hi;
2365
2366 if (enaddr != NULL) {
2367 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2368 (enaddr[3] << 24);
2369 ral_hi = enaddr[4] | (enaddr[5] << 8);
2370 ral_hi |= RAL_AV;
2371 } else {
2372 ral_lo = 0;
2373 ral_hi = 0;
2374 }
2375
2376 if (sc->sc_type >= WM_T_82544) {
2377 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2378 ral_lo);
2379 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2380 ral_hi);
2381 } else {
2382 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2383 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2384 }
2385 }
2386
2387 /*
2388 * wm_mchash:
2389 *
2390 * Compute the hash of the multicast address for the 4096-bit
2391 * multicast filter.
2392 */
2393 static uint32_t
2394 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2395 {
2396 static const int lo_shift[4] = { 4, 3, 2, 0 };
2397 static const int hi_shift[4] = { 4, 5, 6, 8 };
2398 uint32_t hash;
2399
2400 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2401 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2402
2403 return (hash & 0xfff);
2404 }
2405
2406 /*
2407 * wm_set_filter:
2408 *
2409 * Set up the receive filter.
2410 */
2411 void
2412 wm_set_filter(struct wm_softc *sc)
2413 {
2414 struct ethercom *ec = &sc->sc_ethercom;
2415 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2416 struct ether_multi *enm;
2417 struct ether_multistep step;
2418 bus_addr_t mta_reg;
2419 uint32_t hash, reg, bit;
2420 int i;
2421
2422 if (sc->sc_type >= WM_T_82544)
2423 mta_reg = WMREG_CORDOVA_MTA;
2424 else
2425 mta_reg = WMREG_MTA;
2426
2427 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2428
2429 if (ifp->if_flags & IFF_BROADCAST)
2430 sc->sc_rctl |= RCTL_BAM;
2431 if (ifp->if_flags & IFF_PROMISC) {
2432 sc->sc_rctl |= RCTL_UPE;
2433 goto allmulti;
2434 }
2435
2436 /*
2437 * Set the station address in the first RAL slot, and
2438 * clear the remaining slots.
2439 */
2440 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2441 for (i = 1; i < WM_RAL_TABSIZE; i++)
2442 wm_set_ral(sc, NULL, i);
2443
2444 /* Clear out the multicast table. */
2445 for (i = 0; i < WM_MC_TABSIZE; i++)
2446 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2447
2448 ETHER_FIRST_MULTI(step, ec, enm);
2449 while (enm != NULL) {
2450 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2451 /*
2452 * We must listen to a range of multicast addresses.
2453 * For now, just accept all multicasts, rather than
2454 * trying to set only those filter bits needed to match
2455 * the range. (At this time, the only use of address
2456 * ranges is for IP multicast routing, for which the
2457 * range is big enough to require all bits set.)
2458 */
2459 goto allmulti;
2460 }
2461
2462 hash = wm_mchash(sc, enm->enm_addrlo);
2463
2464 reg = (hash >> 5) & 0x7f;
2465 bit = hash & 0x1f;
2466
2467 hash = CSR_READ(sc, mta_reg + (reg << 2));
2468 hash |= 1U << bit;
2469
2470 /* XXX Hardware bug?? */
2471 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2472 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2473 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2474 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2475 } else
2476 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2477
2478 ETHER_NEXT_MULTI(step, enm);
2479 }
2480
2481 ifp->if_flags &= ~IFF_ALLMULTI;
2482 goto setit;
2483
2484 allmulti:
2485 ifp->if_flags |= IFF_ALLMULTI;
2486 sc->sc_rctl |= RCTL_MPE;
2487
2488 setit:
2489 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2490 }
2491
2492 /*
2493 * wm_tbi_mediainit:
2494 *
2495 * Initialize media for use on 1000BASE-X devices.
2496 */
2497 void
2498 wm_tbi_mediainit(struct wm_softc *sc)
2499 {
2500 const char *sep = "";
2501
2502 if (sc->sc_type < WM_T_82543)
2503 sc->sc_tipg = TIPG_WM_DFLT;
2504 else
2505 sc->sc_tipg = TIPG_LG_DFLT;
2506
2507 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2508 wm_tbi_mediastatus);
2509
2510 /*
2511 * SWD Pins:
2512 *
2513 * 0 = Link LED (output)
2514 * 1 = Loss Of Signal (input)
2515 */
2516 sc->sc_ctrl |= CTRL_SWDPIO(0);
2517 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2518
2519 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2520
2521 #define ADD(ss, mm, dd) \
2522 do { \
2523 printf("%s%s", sep, ss); \
2524 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2525 sep = ", "; \
2526 } while (/*CONSTCOND*/0)
2527
2528 printf("%s: ", sc->sc_dev.dv_xname);
2529 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2530 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2531 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2532 printf("\n");
2533
2534 #undef ADD
2535
2536 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2537 }
2538
2539 /*
2540 * wm_tbi_mediastatus: [ifmedia interface function]
2541 *
2542 * Get the current interface media status on a 1000BASE-X device.
2543 */
2544 void
2545 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2546 {
2547 struct wm_softc *sc = ifp->if_softc;
2548
2549 ifmr->ifm_status = IFM_AVALID;
2550 ifmr->ifm_active = IFM_ETHER;
2551
2552 if (sc->sc_tbi_linkup == 0) {
2553 ifmr->ifm_active |= IFM_NONE;
2554 return;
2555 }
2556
2557 ifmr->ifm_status |= IFM_ACTIVE;
2558 ifmr->ifm_active |= IFM_1000_SX;
2559 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2560 ifmr->ifm_active |= IFM_FDX;
2561 }
2562
2563 /*
2564 * wm_tbi_mediachange: [ifmedia interface function]
2565 *
2566 * Set hardware to newly-selected media on a 1000BASE-X device.
2567 */
2568 int
2569 wm_tbi_mediachange(struct ifnet *ifp)
2570 {
2571 struct wm_softc *sc = ifp->if_softc;
2572 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2573 uint32_t status;
2574 int i;
2575
2576 sc->sc_txcw = ife->ifm_data;
2577 if (sc->sc_ctrl & CTRL_RFCE)
2578 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2579 if (sc->sc_ctrl & CTRL_TFCE)
2580 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2581 sc->sc_txcw |= TXCW_ANE;
2582
2583 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2584 delay(10000);
2585
2586 sc->sc_tbi_anstate = 0;
2587
2588 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2589 /* Have signal; wait for the link to come up. */
2590 for (i = 0; i < 50; i++) {
2591 delay(10000);
2592 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2593 break;
2594 }
2595
2596 status = CSR_READ(sc, WMREG_STATUS);
2597 if (status & STATUS_LU) {
2598 /* Link is up. */
2599 DPRINTF(WM_DEBUG_LINK,
2600 ("%s: LINK: set media -> link up %s\n",
2601 sc->sc_dev.dv_xname,
2602 (status & STATUS_FD) ? "FDX" : "HDX"));
2603 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2604 if (status & STATUS_FD)
2605 sc->sc_tctl |=
2606 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2607 else
2608 sc->sc_tctl |=
2609 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2610 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2611 sc->sc_tbi_linkup = 1;
2612 } else {
2613 /* Link is down. */
2614 DPRINTF(WM_DEBUG_LINK,
2615 ("%s: LINK: set media -> link down\n",
2616 sc->sc_dev.dv_xname));
2617 sc->sc_tbi_linkup = 0;
2618 }
2619 } else {
2620 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2621 sc->sc_dev.dv_xname));
2622 sc->sc_tbi_linkup = 0;
2623 }
2624
2625 wm_tbi_set_linkled(sc);
2626
2627 return (0);
2628 }
2629
2630 /*
2631 * wm_tbi_set_linkled:
2632 *
2633 * Update the link LED on 1000BASE-X devices.
2634 */
2635 void
2636 wm_tbi_set_linkled(struct wm_softc *sc)
2637 {
2638
2639 if (sc->sc_tbi_linkup)
2640 sc->sc_ctrl |= CTRL_SWDPIN(0);
2641 else
2642 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2643
2644 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2645 }
2646
2647 /*
2648 * wm_tbi_check_link:
2649 *
2650 * Check the link on 1000BASE-X devices.
2651 */
2652 void
2653 wm_tbi_check_link(struct wm_softc *sc)
2654 {
2655 uint32_t rxcw, ctrl, status;
2656
2657 if (sc->sc_tbi_anstate == 0)
2658 return;
2659 else if (sc->sc_tbi_anstate > 1) {
2660 DPRINTF(WM_DEBUG_LINK,
2661 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2662 sc->sc_tbi_anstate));
2663 sc->sc_tbi_anstate--;
2664 return;
2665 }
2666
2667 sc->sc_tbi_anstate = 0;
2668
2669 rxcw = CSR_READ(sc, WMREG_RXCW);
2670 ctrl = CSR_READ(sc, WMREG_CTRL);
2671 status = CSR_READ(sc, WMREG_STATUS);
2672
2673 if ((status & STATUS_LU) == 0) {
2674 DPRINTF(WM_DEBUG_LINK,
2675 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2676 sc->sc_tbi_linkup = 0;
2677 } else {
2678 DPRINTF(WM_DEBUG_LINK,
2679 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2680 (status & STATUS_FD) ? "FDX" : "HDX"));
2681 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2682 if (status & STATUS_FD)
2683 sc->sc_tctl |=
2684 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2685 else
2686 sc->sc_tctl |=
2687 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2688 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2689 sc->sc_tbi_linkup = 1;
2690 }
2691
2692 wm_tbi_set_linkled(sc);
2693 }
2694
2695 /*
2696 * wm_gmii_reset:
2697 *
2698 * Reset the PHY.
2699 */
2700 void
2701 wm_gmii_reset(struct wm_softc *sc)
2702 {
2703 uint32_t reg;
2704
2705 if (sc->sc_type >= WM_T_82544) {
2706 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2707 delay(20000);
2708
2709 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2710 delay(20000);
2711 } else {
2712 /* The PHY reset pin is active-low. */
2713 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2714 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2715 CTRL_EXT_SWDPIN(4));
2716 reg |= CTRL_EXT_SWDPIO(4);
2717
2718 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2719 delay(10);
2720
2721 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2722 delay(10);
2723
2724 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2725 delay(10);
2726 #if 0
2727 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2728 #endif
2729 }
2730 }
2731
2732 /*
2733 * wm_gmii_mediainit:
2734 *
2735 * Initialize media for use on 1000BASE-T devices.
2736 */
2737 void
2738 wm_gmii_mediainit(struct wm_softc *sc)
2739 {
2740 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2741
2742 /* We have MII. */
2743 sc->sc_flags |= WM_F_HAS_MII;
2744
2745 sc->sc_tipg = TIPG_1000T_DFLT;
2746
2747 /*
2748 * Let the chip set speed/duplex on its own based on
2749 * signals from the PHY.
2750 */
2751 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2752 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2753
2754 /* Initialize our media structures and probe the GMII. */
2755 sc->sc_mii.mii_ifp = ifp;
2756
2757 if (sc->sc_type >= WM_T_82544) {
2758 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2759 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2760 } else {
2761 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2762 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2763 }
2764 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2765
2766 wm_gmii_reset(sc);
2767
2768 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2769 wm_gmii_mediastatus);
2770
2771 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2772 MII_OFFSET_ANY, 0);
2773 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2774 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2775 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2776 } else
2777 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2778 }
2779
2780 /*
2781 * wm_gmii_mediastatus: [ifmedia interface function]
2782 *
2783 * Get the current interface media status on a 1000BASE-T device.
2784 */
2785 void
2786 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2787 {
2788 struct wm_softc *sc = ifp->if_softc;
2789
2790 mii_pollstat(&sc->sc_mii);
2791 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2792 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2793 }
2794
2795 /*
2796 * wm_gmii_mediachange: [ifmedia interface function]
2797 *
2798 * Set hardware to newly-selected media on a 1000BASE-T device.
2799 */
2800 int
2801 wm_gmii_mediachange(struct ifnet *ifp)
2802 {
2803 struct wm_softc *sc = ifp->if_softc;
2804
2805 if (ifp->if_flags & IFF_UP)
2806 mii_mediachg(&sc->sc_mii);
2807 return (0);
2808 }
2809
2810 #define MDI_IO CTRL_SWDPIN(2)
2811 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2812 #define MDI_CLK CTRL_SWDPIN(3)
2813
2814 static void
2815 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2816 {
2817 uint32_t i, v;
2818
2819 v = CSR_READ(sc, WMREG_CTRL);
2820 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2821 v |= MDI_DIR | CTRL_SWDPIO(3);
2822
2823 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2824 if (data & i)
2825 v |= MDI_IO;
2826 else
2827 v &= ~MDI_IO;
2828 CSR_WRITE(sc, WMREG_CTRL, v);
2829 delay(10);
2830 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2831 delay(10);
2832 CSR_WRITE(sc, WMREG_CTRL, v);
2833 delay(10);
2834 }
2835 }
2836
2837 static uint32_t
2838 i82543_mii_recvbits(struct wm_softc *sc)
2839 {
2840 uint32_t v, i, data = 0;
2841
2842 v = CSR_READ(sc, WMREG_CTRL);
2843 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2844 v |= CTRL_SWDPIO(3);
2845
2846 CSR_WRITE(sc, WMREG_CTRL, v);
2847 delay(10);
2848 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2849 delay(10);
2850 CSR_WRITE(sc, WMREG_CTRL, v);
2851 delay(10);
2852
2853 for (i = 0; i < 16; i++) {
2854 data <<= 1;
2855 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2856 delay(10);
2857 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2858 data |= 1;
2859 CSR_WRITE(sc, WMREG_CTRL, v);
2860 delay(10);
2861 }
2862
2863 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2864 delay(10);
2865 CSR_WRITE(sc, WMREG_CTRL, v);
2866 delay(10);
2867
2868 return (data);
2869 }
2870
2871 #undef MDI_IO
2872 #undef MDI_DIR
2873 #undef MDI_CLK
2874
2875 /*
2876 * wm_gmii_i82543_readreg: [mii interface function]
2877 *
2878 * Read a PHY register on the GMII (i82543 version).
2879 */
2880 int
2881 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2882 {
2883 struct wm_softc *sc = (void *) self;
2884 int rv;
2885
2886 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2887 i82543_mii_sendbits(sc, reg | (phy << 5) |
2888 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2889 rv = i82543_mii_recvbits(sc) & 0xffff;
2890
2891 DPRINTF(WM_DEBUG_GMII,
2892 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2893 sc->sc_dev.dv_xname, phy, reg, rv));
2894
2895 return (rv);
2896 }
2897
2898 /*
2899 * wm_gmii_i82543_writereg: [mii interface function]
2900 *
2901 * Write a PHY register on the GMII (i82543 version).
2902 */
2903 void
2904 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2905 {
2906 struct wm_softc *sc = (void *) self;
2907
2908 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2909 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2910 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2911 (MII_COMMAND_START << 30), 32);
2912 }
2913
2914 /*
2915 * wm_gmii_i82544_readreg: [mii interface function]
2916 *
2917 * Read a PHY register on the GMII.
2918 */
2919 int
2920 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2921 {
2922 struct wm_softc *sc = (void *) self;
2923 uint32_t mdic;
2924 int i, rv;
2925
2926 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2927 MDIC_REGADD(reg));
2928
2929 for (i = 0; i < 100; i++) {
2930 mdic = CSR_READ(sc, WMREG_MDIC);
2931 if (mdic & MDIC_READY)
2932 break;
2933 delay(10);
2934 }
2935
2936 if ((mdic & MDIC_READY) == 0) {
2937 printf("%s: MDIC read timed out: phy %d reg %d\n",
2938 sc->sc_dev.dv_xname, phy, reg);
2939 rv = 0;
2940 } else if (mdic & MDIC_E) {
2941 #if 0 /* This is normal if no PHY is present. */
2942 printf("%s: MDIC read error: phy %d reg %d\n",
2943 sc->sc_dev.dv_xname, phy, reg);
2944 #endif
2945 rv = 0;
2946 } else {
2947 rv = MDIC_DATA(mdic);
2948 if (rv == 0xffff)
2949 rv = 0;
2950 }
2951
2952 return (rv);
2953 }
2954
2955 /*
2956 * wm_gmii_i82544_writereg: [mii interface function]
2957 *
2958 * Write a PHY register on the GMII.
2959 */
2960 void
2961 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2962 {
2963 struct wm_softc *sc = (void *) self;
2964 uint32_t mdic;
2965 int i;
2966
2967 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2968 MDIC_REGADD(reg) | MDIC_DATA(val));
2969
2970 for (i = 0; i < 100; i++) {
2971 mdic = CSR_READ(sc, WMREG_MDIC);
2972 if (mdic & MDIC_READY)
2973 break;
2974 delay(10);
2975 }
2976
2977 if ((mdic & MDIC_READY) == 0)
2978 printf("%s: MDIC write timed out: phy %d reg %d\n",
2979 sc->sc_dev.dv_xname, phy, reg);
2980 else if (mdic & MDIC_E)
2981 printf("%s: MDIC write error: phy %d reg %d\n",
2982 sc->sc_dev.dv_xname, phy, reg);
2983 }
2984
2985 /*
2986 * wm_gmii_statchg: [mii interface function]
2987 *
2988 * Callback from MII layer when media changes.
2989 */
2990 void
2991 wm_gmii_statchg(struct device *self)
2992 {
2993 struct wm_softc *sc = (void *) self;
2994
2995 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2996
2997 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2998 DPRINTF(WM_DEBUG_LINK,
2999 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
3000 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3001 } else {
3002 DPRINTF(WM_DEBUG_LINK,
3003 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
3004 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3005 }
3006
3007 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3008 }
3009