if_wm.c revision 1.21 1 /* $NetBSD: if_wm.c,v 1.21 2002/08/23 07:45:39 itojun Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include "bpfilter.h"
52 #include "rnd.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <netinet/in.h> /* XXX for struct ip */
82 #include <netinet/in_systm.h> /* XXX for struct ip */
83 #include <netinet/ip.h> /* XXX for struct ip */
84 #include <netinet/tcp.h> /* XXX for struct tcphdr */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and manage
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 /*
183 * Software state per device.
184 */
185 struct wm_softc {
186 struct device sc_dev; /* generic device information */
187 bus_space_tag_t sc_st; /* bus space tag */
188 bus_space_handle_t sc_sh; /* bus space handle */
189 bus_dma_tag_t sc_dmat; /* bus DMA tag */
190 struct ethercom sc_ethercom; /* ethernet common data */
191 void *sc_sdhook; /* shutdown hook */
192
193 int sc_type; /* chip type; see below */
194 int sc_flags; /* flags; see below */
195
196 void *sc_ih; /* interrupt cookie */
197
198 struct mii_data sc_mii; /* MII/media information */
199
200 struct callout sc_tick_ch; /* tick callout */
201
202 bus_dmamap_t sc_cddmamap; /* control data DMA map */
203 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
204
205 /*
206 * Software state for the transmit and receive descriptors.
207 */
208 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210
211 /*
212 * Control data structures.
213 */
214 struct wm_control_data *sc_control_data;
215 #define sc_txdescs sc_control_data->wcd_txdescs
216 #define sc_rxdescs sc_control_data->wcd_rxdescs
217
218 #ifdef WM_EVENT_COUNTERS
219 /* Event counters. */
220 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
221 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
222 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
223 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
224 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
225 struct evcnt sc_ev_rxintr; /* Rx interrupts */
226 struct evcnt sc_ev_linkintr; /* Link interrupts */
227
228 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
229 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
230 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
231 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
232
233 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
234 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
235 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
236
237 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
239
240 struct evcnt sc_ev_tu; /* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242
243 bus_addr_t sc_tdt_reg; /* offset of TDT register */
244
245 int sc_txfree; /* number of free Tx descriptors */
246 int sc_txnext; /* next ready Tx descriptor */
247
248 int sc_txsfree; /* number of free Tx jobs */
249 int sc_txsnext; /* next free Tx job */
250 int sc_txsdirty; /* dirty Tx jobs */
251
252 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
253 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278
279 #if NRND > 0
280 rndsource_element_t rnd_source; /* random source */
281 #endif
282 };
283
284 #define WM_RXCHAIN_RESET(sc) \
285 do { \
286 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
287 *(sc)->sc_rxtailp = NULL; \
288 (sc)->sc_rxlen = 0; \
289 } while (/*CONSTCOND*/0)
290
291 #define WM_RXCHAIN_LINK(sc, m) \
292 do { \
293 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
294 (sc)->sc_rxtailp = &(m)->m_next; \
295 } while (/*CONSTCOND*/0)
296
297 /* sc_type */
298 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
299 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
300 #define WM_T_82543 2 /* i82543 */
301 #define WM_T_82544 3 /* i82544 */
302 #define WM_T_82540 4 /* i82540 */
303 #define WM_T_82545 5 /* i82545 */
304 #define WM_T_82546 6 /* i82546 */
305
306 /* sc_flags */
307 #define WM_F_HAS_MII 0x01 /* has MII */
308 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
312 #else
313 #define WM_EVCNT_INCR(ev) /* nothing */
314 #endif
315
316 #define CSR_READ(sc, reg) \
317 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
318 #define CSR_WRITE(sc, reg, val) \
319 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
320
321 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
322 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
323
324 #define WM_CDTXSYNC(sc, x, n, ops) \
325 do { \
326 int __x, __n; \
327 \
328 __x = (x); \
329 __n = (n); \
330 \
331 /* If it will wrap around, sync to the end of the ring. */ \
332 if ((__x + __n) > WM_NTXDESC) { \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
335 (WM_NTXDESC - __x), (ops)); \
336 __n -= (WM_NTXDESC - __x); \
337 __x = 0; \
338 } \
339 \
340 /* Now sync whatever is left. */ \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
343 } while (/*CONSTCOND*/0)
344
345 #define WM_CDRXSYNC(sc, x, ops) \
346 do { \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
349 } while (/*CONSTCOND*/0)
350
351 #define WM_INIT_RXDESC(sc, x) \
352 do { \
353 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
354 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
355 struct mbuf *__m = __rxs->rxs_mbuf; \
356 \
357 /* \
358 * Note: We scoot the packet forward 2 bytes in the buffer \
359 * so that the payload after the Ethernet header is aligned \
360 * to a 4-byte boundary. \
361 * \
362 * XXX BRAINDAMAGE ALERT! \
363 * The stupid chip uses the same size for every buffer, which \
364 * is set in the Receive Control register. We are using the 2K \
365 * size option, but what we REALLY want is (2K - 2)! For this \
366 * reason, we can't accept packets longer than the standard \
367 * Ethernet MTU, without incurring a big penalty to copy every \
368 * incoming packet to a new, suitably aligned buffer. \
369 * \
370 * We'll need to make some changes to the layer 3/4 parts of \
371 * the stack (to copy the headers to a new buffer if not \
372 * aligned) in order to support large MTU on this chip. Lame. \
373 */ \
374 __m->m_data = __m->m_ext.ext_buf + 2; \
375 \
376 __rxd->wrx_addr.wa_low = \
377 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
378 __rxd->wrx_addr.wa_high = 0; \
379 __rxd->wrx_len = 0; \
380 __rxd->wrx_cksum = 0; \
381 __rxd->wrx_status = 0; \
382 __rxd->wrx_errors = 0; \
383 __rxd->wrx_special = 0; \
384 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
385 \
386 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
387 } while (/*CONSTCOND*/0)
388
389 void wm_start(struct ifnet *);
390 void wm_watchdog(struct ifnet *);
391 int wm_ioctl(struct ifnet *, u_long, caddr_t);
392 int wm_init(struct ifnet *);
393 void wm_stop(struct ifnet *, int);
394
395 void wm_shutdown(void *);
396
397 void wm_reset(struct wm_softc *);
398 void wm_rxdrain(struct wm_softc *);
399 int wm_add_rxbuf(struct wm_softc *, int);
400 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
401 void wm_tick(void *);
402
403 void wm_set_filter(struct wm_softc *);
404
405 int wm_intr(void *);
406 void wm_txintr(struct wm_softc *);
407 void wm_rxintr(struct wm_softc *);
408 void wm_linkintr(struct wm_softc *, uint32_t);
409
410 void wm_tbi_mediainit(struct wm_softc *);
411 int wm_tbi_mediachange(struct ifnet *);
412 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
413
414 void wm_tbi_set_linkled(struct wm_softc *);
415 void wm_tbi_check_link(struct wm_softc *);
416
417 void wm_gmii_reset(struct wm_softc *);
418
419 int wm_gmii_i82543_readreg(struct device *, int, int);
420 void wm_gmii_i82543_writereg(struct device *, int, int, int);
421
422 int wm_gmii_i82544_readreg(struct device *, int, int);
423 void wm_gmii_i82544_writereg(struct device *, int, int, int);
424
425 void wm_gmii_statchg(struct device *);
426
427 void wm_gmii_mediainit(struct wm_softc *);
428 int wm_gmii_mediachange(struct ifnet *);
429 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
430
431 int wm_match(struct device *, struct cfdata *, void *);
432 void wm_attach(struct device *, struct device *, void *);
433
434 int wm_copy_small = 0;
435
436 struct cfattach wm_ca = {
437 sizeof(struct wm_softc), wm_match, wm_attach,
438 };
439
440 /*
441 * Devices supported by this driver.
442 */
443 const struct wm_product {
444 pci_vendor_id_t wmp_vendor;
445 pci_product_id_t wmp_product;
446 const char *wmp_name;
447 int wmp_type;
448 int wmp_flags;
449 #define WMP_F_1000X 0x01
450 #define WMP_F_1000T 0x02
451 } wm_products[] = {
452 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
453 "Intel i82542 1000BASE-X Ethernet",
454 WM_T_82542_2_1, WMP_F_1000X },
455
456 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
457 "Intel i82543GC 1000BASE-X Ethernet",
458 WM_T_82543, WMP_F_1000X },
459
460 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
461 "Intel i82543GC 1000BASE-T Ethernet",
462 WM_T_82543, WMP_F_1000T },
463
464 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
465 "Intel i82544EI 1000BASE-T Ethernet",
466 WM_T_82544, WMP_F_1000T },
467
468 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
469 "Intel i82544EI 1000BASE-X Ethernet",
470 WM_T_82544, WMP_F_1000X },
471
472 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
473 "Intel i82544GC 1000BASE-T Ethernet",
474 WM_T_82544, WMP_F_1000T },
475
476 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
477 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
478 WM_T_82544, WMP_F_1000T },
479
480 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
481 "Intel i82540EM 1000BASE-T Ethernet",
482 WM_T_82540, WMP_F_1000T },
483
484 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
485 "Intel i82545EM 1000BASE-T Ethernet",
486 WM_T_82545, WMP_F_1000T },
487
488 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
489 "Intel i82546EB 1000BASE-T Ethernet",
490 WM_T_82546, WMP_F_1000T },
491
492 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
493 "Intel i82545EM 1000BASE-X Ethernet",
494 WM_T_82545, WMP_F_1000X },
495
496 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
497 "Intel i82546EB 1000BASE-X Ethernet",
498 WM_T_82546, WMP_F_1000X },
499
500 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
501 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
502 WM_T_82540, WMP_F_1000T },
503
504 { 0, 0,
505 NULL,
506 0, 0 },
507 };
508
509 #ifdef WM_EVENT_COUNTERS
510 #if WM_NTXSEGS != 16
511 #error Update wm_txseg_evcnt_names
512 #endif
513 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
514 "txseg1",
515 "txseg2",
516 "txseg3",
517 "txseg4",
518 "txseg5",
519 "txseg6",
520 "txseg7",
521 "txseg8",
522 "txseg9",
523 "txseg10",
524 "txseg11",
525 "txseg12",
526 "txseg13",
527 "txseg14",
528 "txseg15",
529 "txseg16",
530 };
531 #endif /* WM_EVENT_COUNTERS */
532
533 static const struct wm_product *
534 wm_lookup(const struct pci_attach_args *pa)
535 {
536 const struct wm_product *wmp;
537
538 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
539 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
540 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
541 return (wmp);
542 }
543 return (NULL);
544 }
545
546 int
547 wm_match(struct device *parent, struct cfdata *cf, void *aux)
548 {
549 struct pci_attach_args *pa = aux;
550
551 if (wm_lookup(pa) != NULL)
552 return (1);
553
554 return (0);
555 }
556
557 void
558 wm_attach(struct device *parent, struct device *self, void *aux)
559 {
560 struct wm_softc *sc = (void *) self;
561 struct pci_attach_args *pa = aux;
562 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
563 pci_chipset_tag_t pc = pa->pa_pc;
564 pci_intr_handle_t ih;
565 const char *intrstr = NULL;
566 bus_space_tag_t memt;
567 bus_space_handle_t memh;
568 bus_dma_segment_t seg;
569 int memh_valid;
570 int i, rseg, error;
571 const struct wm_product *wmp;
572 uint8_t enaddr[ETHER_ADDR_LEN];
573 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
574 pcireg_t preg, memtype;
575 int pmreg;
576
577 callout_init(&sc->sc_tick_ch);
578
579 wmp = wm_lookup(pa);
580 if (wmp == NULL) {
581 printf("\n");
582 panic("wm_attach: impossible");
583 }
584
585 sc->sc_dmat = pa->pa_dmat;
586
587 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
588 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
589
590 sc->sc_type = wmp->wmp_type;
591 if (sc->sc_type < WM_T_82543) {
592 if (preg < 2) {
593 printf("%s: i82542 must be at least rev. 2\n",
594 sc->sc_dev.dv_xname);
595 return;
596 }
597 if (preg < 3)
598 sc->sc_type = WM_T_82542_2_0;
599 }
600
601 /*
602 * Some chips require a handshake to access the EEPROM.
603 */
604 if (sc->sc_type >= WM_T_82540)
605 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
606
607 /*
608 * Map the device.
609 */
610 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
611 switch (memtype) {
612 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
613 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
614 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
615 memtype, 0, &memt, &memh, NULL, NULL) == 0);
616 break;
617 default:
618 memh_valid = 0;
619 }
620
621 if (memh_valid) {
622 sc->sc_st = memt;
623 sc->sc_sh = memh;
624 } else {
625 printf("%s: unable to map device registers\n",
626 sc->sc_dev.dv_xname);
627 return;
628 }
629
630 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
631 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
632 preg |= PCI_COMMAND_MASTER_ENABLE;
633 if (sc->sc_type < WM_T_82542_2_1)
634 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
635 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
636
637 /* Get it out of power save mode, if needed. */
638 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
639 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
640 if (preg == 3) {
641 /*
642 * The card has lost all configuration data in
643 * this state, so punt.
644 */
645 printf("%s: unable to wake from power state D3\n",
646 sc->sc_dev.dv_xname);
647 return;
648 }
649 if (preg != 0) {
650 printf("%s: waking up from power state D%d\n",
651 sc->sc_dev.dv_xname, preg);
652 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
653 }
654 }
655
656 /*
657 * Map and establish our interrupt.
658 */
659 if (pci_intr_map(pa, &ih)) {
660 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
661 return;
662 }
663 intrstr = pci_intr_string(pc, ih);
664 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
665 if (sc->sc_ih == NULL) {
666 printf("%s: unable to establish interrupt",
667 sc->sc_dev.dv_xname);
668 if (intrstr != NULL)
669 printf(" at %s", intrstr);
670 printf("\n");
671 return;
672 }
673 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
674
675 /*
676 * Allocate the control data structures, and create and load the
677 * DMA map for it.
678 */
679 if ((error = bus_dmamem_alloc(sc->sc_dmat,
680 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
681 0)) != 0) {
682 printf("%s: unable to allocate control data, error = %d\n",
683 sc->sc_dev.dv_xname, error);
684 goto fail_0;
685 }
686
687 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
688 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
689 0)) != 0) {
690 printf("%s: unable to map control data, error = %d\n",
691 sc->sc_dev.dv_xname, error);
692 goto fail_1;
693 }
694
695 if ((error = bus_dmamap_create(sc->sc_dmat,
696 sizeof(struct wm_control_data), 1,
697 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
698 printf("%s: unable to create control data DMA map, "
699 "error = %d\n", sc->sc_dev.dv_xname, error);
700 goto fail_2;
701 }
702
703 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
704 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
705 0)) != 0) {
706 printf("%s: unable to load control data DMA map, error = %d\n",
707 sc->sc_dev.dv_xname, error);
708 goto fail_3;
709 }
710
711 /*
712 * Create the transmit buffer DMA maps.
713 */
714 for (i = 0; i < WM_TXQUEUELEN; i++) {
715 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
716 WM_NTXSEGS, MCLBYTES, 0, 0,
717 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
718 printf("%s: unable to create Tx DMA map %d, "
719 "error = %d\n", sc->sc_dev.dv_xname, i, error);
720 goto fail_4;
721 }
722 }
723
724 /*
725 * Create the receive buffer DMA maps.
726 */
727 for (i = 0; i < WM_NRXDESC; i++) {
728 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
729 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
730 printf("%s: unable to create Rx DMA map %d, "
731 "error = %d\n", sc->sc_dev.dv_xname, i, error);
732 goto fail_5;
733 }
734 sc->sc_rxsoft[i].rxs_mbuf = NULL;
735 }
736
737 /*
738 * Reset the chip to a known state.
739 */
740 wm_reset(sc);
741
742 /*
743 * Read the Ethernet address from the EEPROM.
744 */
745 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
746 sizeof(myea) / sizeof(myea[0]), myea);
747 enaddr[0] = myea[0] & 0xff;
748 enaddr[1] = myea[0] >> 8;
749 enaddr[2] = myea[1] & 0xff;
750 enaddr[3] = myea[1] >> 8;
751 enaddr[4] = myea[2] & 0xff;
752 enaddr[5] = myea[2] >> 8;
753
754 /*
755 * Toggle the LSB of the MAC address on the second port
756 * of the i82546.
757 */
758 if (sc->sc_type == WM_T_82546) {
759 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
760 enaddr[5] ^= 1;
761 }
762
763 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
764 ether_sprintf(enaddr));
765
766 /*
767 * Read the config info from the EEPROM, and set up various
768 * bits in the control registers based on their contents.
769 */
770 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
771 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
772 if (sc->sc_type >= WM_T_82544)
773 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
774
775 if (cfg1 & EEPROM_CFG1_ILOS)
776 sc->sc_ctrl |= CTRL_ILOS;
777 if (sc->sc_type >= WM_T_82544) {
778 sc->sc_ctrl |=
779 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
780 CTRL_SWDPIO_SHIFT;
781 sc->sc_ctrl |=
782 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
783 CTRL_SWDPINS_SHIFT;
784 } else {
785 sc->sc_ctrl |=
786 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
787 CTRL_SWDPIO_SHIFT;
788 }
789
790 #if 0
791 if (sc->sc_type >= WM_T_82544) {
792 if (cfg1 & EEPROM_CFG1_IPS0)
793 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
794 if (cfg1 & EEPROM_CFG1_IPS1)
795 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
796 sc->sc_ctrl_ext |=
797 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
798 CTRL_EXT_SWDPIO_SHIFT;
799 sc->sc_ctrl_ext |=
800 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
801 CTRL_EXT_SWDPINS_SHIFT;
802 } else {
803 sc->sc_ctrl_ext |=
804 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
805 CTRL_EXT_SWDPIO_SHIFT;
806 }
807 #endif
808
809 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
810 #if 0
811 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
812 #endif
813
814 /*
815 * Set up some register offsets that are different between
816 * the i82542 and the i82543 and later chips.
817 */
818 if (sc->sc_type < WM_T_82543) {
819 sc->sc_rdt_reg = WMREG_OLD_RDT0;
820 sc->sc_tdt_reg = WMREG_OLD_TDT;
821 } else {
822 sc->sc_rdt_reg = WMREG_RDT;
823 sc->sc_tdt_reg = WMREG_TDT;
824 }
825
826 /*
827 * Determine if we should use flow control. We should
828 * always use it, unless we're on a i82542 < 2.1.
829 */
830 if (sc->sc_type >= WM_T_82542_2_1)
831 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
832
833 /*
834 * Determine if we're TBI or GMII mode, and initialize the
835 * media structures accordingly.
836 */
837 if (sc->sc_type < WM_T_82543 ||
838 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
839 if (wmp->wmp_flags & WMP_F_1000T)
840 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
841 "product!\n", sc->sc_dev.dv_xname);
842 wm_tbi_mediainit(sc);
843 } else {
844 if (wmp->wmp_flags & WMP_F_1000X)
845 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
846 "product!\n", sc->sc_dev.dv_xname);
847 wm_gmii_mediainit(sc);
848 }
849
850 ifp = &sc->sc_ethercom.ec_if;
851 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
852 ifp->if_softc = sc;
853 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
854 ifp->if_ioctl = wm_ioctl;
855 ifp->if_start = wm_start;
856 ifp->if_watchdog = wm_watchdog;
857 ifp->if_init = wm_init;
858 ifp->if_stop = wm_stop;
859 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
860 IFQ_SET_READY(&ifp->if_snd);
861
862 /*
863 * If we're a i82543 or greater, we can support VLANs.
864 */
865 if (sc->sc_type >= WM_T_82543)
866 sc->sc_ethercom.ec_capabilities |=
867 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
868
869 /*
870 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
871 * on i82543 and later.
872 */
873 if (sc->sc_type >= WM_T_82543)
874 ifp->if_capabilities |=
875 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
876
877 /*
878 * Attach the interface.
879 */
880 if_attach(ifp);
881 ether_ifattach(ifp, enaddr);
882 #if NRND > 0
883 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
884 RND_TYPE_NET, 0);
885 #endif
886
887 #ifdef WM_EVENT_COUNTERS
888 /* Attach event counters. */
889 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
890 NULL, sc->sc_dev.dv_xname, "txsstall");
891 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
892 NULL, sc->sc_dev.dv_xname, "txdstall");
893 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
894 NULL, sc->sc_dev.dv_xname, "txforceintr");
895 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
896 NULL, sc->sc_dev.dv_xname, "txdw");
897 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
898 NULL, sc->sc_dev.dv_xname, "txqe");
899 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
900 NULL, sc->sc_dev.dv_xname, "rxintr");
901 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
902 NULL, sc->sc_dev.dv_xname, "linkintr");
903
904 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
905 NULL, sc->sc_dev.dv_xname, "rxipsum");
906 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
907 NULL, sc->sc_dev.dv_xname, "rxtusum");
908 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
909 NULL, sc->sc_dev.dv_xname, "txipsum");
910 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
911 NULL, sc->sc_dev.dv_xname, "txtusum");
912
913 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
914 NULL, sc->sc_dev.dv_xname, "txctx init");
915 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
916 NULL, sc->sc_dev.dv_xname, "txctx hit");
917 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
918 NULL, sc->sc_dev.dv_xname, "txctx miss");
919
920 for (i = 0; i < WM_NTXSEGS; i++)
921 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
922 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
923
924 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
925 NULL, sc->sc_dev.dv_xname, "txdrop");
926
927 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
928 NULL, sc->sc_dev.dv_xname, "tu");
929 #endif /* WM_EVENT_COUNTERS */
930
931 /*
932 * Make sure the interface is shutdown during reboot.
933 */
934 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
935 if (sc->sc_sdhook == NULL)
936 printf("%s: WARNING: unable to establish shutdown hook\n",
937 sc->sc_dev.dv_xname);
938 return;
939
940 /*
941 * Free any resources we've allocated during the failed attach
942 * attempt. Do this in reverse order and fall through.
943 */
944 fail_5:
945 for (i = 0; i < WM_NRXDESC; i++) {
946 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
947 bus_dmamap_destroy(sc->sc_dmat,
948 sc->sc_rxsoft[i].rxs_dmamap);
949 }
950 fail_4:
951 for (i = 0; i < WM_TXQUEUELEN; i++) {
952 if (sc->sc_txsoft[i].txs_dmamap != NULL)
953 bus_dmamap_destroy(sc->sc_dmat,
954 sc->sc_txsoft[i].txs_dmamap);
955 }
956 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
957 fail_3:
958 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
959 fail_2:
960 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
961 sizeof(struct wm_control_data));
962 fail_1:
963 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
964 fail_0:
965 return;
966 }
967
968 /*
969 * wm_shutdown:
970 *
971 * Make sure the interface is stopped at reboot time.
972 */
973 void
974 wm_shutdown(void *arg)
975 {
976 struct wm_softc *sc = arg;
977
978 wm_stop(&sc->sc_ethercom.ec_if, 1);
979 }
980
981 /*
982 * wm_tx_cksum:
983 *
984 * Set up TCP/IP checksumming parameters for the
985 * specified packet.
986 */
987 static int
988 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
989 uint32_t *fieldsp)
990 {
991 struct mbuf *m0 = txs->txs_mbuf;
992 struct livengood_tcpip_ctxdesc *t;
993 uint32_t fields = 0, ipcs, tucs;
994 struct ip *ip;
995 struct ether_header *eh;
996 int offset, iphl;
997
998 /*
999 * XXX It would be nice if the mbuf pkthdr had offset
1000 * fields for the protocol headers.
1001 */
1002
1003 eh = mtod(m0, struct ether_header *);
1004 switch (htons(eh->ether_type)) {
1005 case ETHERTYPE_IP:
1006 iphl = sizeof(struct ip);
1007 offset = ETHER_HDR_LEN;
1008 break;
1009
1010 default:
1011 /*
1012 * Don't support this protocol or encapsulation.
1013 */
1014 *fieldsp = 0;
1015 *cmdp = 0;
1016 return (0);
1017 }
1018
1019 /* XXX */
1020 if (m0->m_len < (offset + iphl)) {
1021 printf("%s: wm_tx_cksum: need to m_pullup, "
1022 "packet dropped\n", sc->sc_dev.dv_xname);
1023 return (EINVAL);
1024 }
1025
1026 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1027 iphl = ip->ip_hl << 2;
1028
1029 /*
1030 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1031 * offload feature, if we load the context descriptor, we
1032 * MUST provide valid values for IPCSS and TUCSS fields.
1033 */
1034
1035 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1036 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1037 fields |= htole32(WTX_IXSM);
1038 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1039 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1040 WTX_TCPIP_IPCSE(offset + iphl - 1));
1041 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1042 /* Use the cached value. */
1043 ipcs = sc->sc_txctx_ipcs;
1044 } else {
1045 /* Just initialize it to the likely value anyway. */
1046 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1047 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1048 WTX_TCPIP_IPCSE(offset + iphl - 1));
1049 }
1050
1051 offset += iphl;
1052
1053 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1054 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1055 fields |= htole32(WTX_TXSM);
1056 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1057 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1058 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1059 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1060 /* Use the cached value. */
1061 tucs = sc->sc_txctx_tucs;
1062 } else {
1063 /* Just initialize it to a valid TCP context. */
1064 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1065 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1066 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1067 }
1068
1069 if (sc->sc_txctx_ipcs == ipcs &&
1070 sc->sc_txctx_tucs == tucs) {
1071 /* Cached context is fine. */
1072 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1073 } else {
1074 /* Fill in the context descriptor. */
1075 #ifdef WM_EVENT_COUNTERS
1076 if (sc->sc_txctx_ipcs == 0xffffffff &&
1077 sc->sc_txctx_tucs == 0xffffffff)
1078 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1079 else
1080 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1081 #endif
1082 t = (struct livengood_tcpip_ctxdesc *)
1083 &sc->sc_txdescs[sc->sc_txnext];
1084 t->tcpip_ipcs = ipcs;
1085 t->tcpip_tucs = tucs;
1086 t->tcpip_cmdlen =
1087 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1088 t->tcpip_seg = 0;
1089 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1090
1091 sc->sc_txctx_ipcs = ipcs;
1092 sc->sc_txctx_tucs = tucs;
1093
1094 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1095 txs->txs_ndesc++;
1096 }
1097
1098 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1099 *fieldsp = fields;
1100
1101 return (0);
1102 }
1103
1104 /*
1105 * wm_start: [ifnet interface function]
1106 *
1107 * Start packet transmission on the interface.
1108 */
1109 void
1110 wm_start(struct ifnet *ifp)
1111 {
1112 struct wm_softc *sc = ifp->if_softc;
1113 struct mbuf *m0/*, *m*/;
1114 struct wm_txsoft *txs;
1115 bus_dmamap_t dmamap;
1116 int error, nexttx, lasttx, ofree, seg;
1117 uint32_t cksumcmd, cksumfields;
1118
1119 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1120 return;
1121
1122 /*
1123 * Remember the previous number of free descriptors.
1124 */
1125 ofree = sc->sc_txfree;
1126
1127 /*
1128 * Loop through the send queue, setting up transmit descriptors
1129 * until we drain the queue, or use up all available transmit
1130 * descriptors.
1131 */
1132 for (;;) {
1133 /* Grab a packet off the queue. */
1134 IFQ_POLL(&ifp->if_snd, m0);
1135 if (m0 == NULL)
1136 break;
1137
1138 DPRINTF(WM_DEBUG_TX,
1139 ("%s: TX: have packet to transmit: %p\n",
1140 sc->sc_dev.dv_xname, m0));
1141
1142 /* Get a work queue entry. */
1143 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1144 wm_txintr(sc);
1145 if (sc->sc_txsfree == 0) {
1146 DPRINTF(WM_DEBUG_TX,
1147 ("%s: TX: no free job descriptors\n",
1148 sc->sc_dev.dv_xname));
1149 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1150 break;
1151 }
1152 }
1153
1154 txs = &sc->sc_txsoft[sc->sc_txsnext];
1155 dmamap = txs->txs_dmamap;
1156
1157 /*
1158 * Load the DMA map. If this fails, the packet either
1159 * didn't fit in the allotted number of segments, or we
1160 * were short on resources. For the too-many-segments
1161 * case, we simply report an error and drop the packet,
1162 * since we can't sanely copy a jumbo packet to a single
1163 * buffer.
1164 */
1165 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1166 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1167 if (error) {
1168 if (error == EFBIG) {
1169 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1170 printf("%s: Tx packet consumes too many "
1171 "DMA segments, dropping...\n",
1172 sc->sc_dev.dv_xname);
1173 IFQ_DEQUEUE(&ifp->if_snd, m0);
1174 m_freem(m0);
1175 continue;
1176 }
1177 /*
1178 * Short on resources, just stop for now.
1179 */
1180 DPRINTF(WM_DEBUG_TX,
1181 ("%s: TX: dmamap load failed: %d\n",
1182 sc->sc_dev.dv_xname, error));
1183 break;
1184 }
1185
1186 /*
1187 * Ensure we have enough descriptors free to describe
1188 * the packet. Note, we always reserve one descriptor
1189 * at the end of the ring due to the semantics of the
1190 * TDT register, plus one more in the event we need
1191 * to re-load checksum offload context.
1192 */
1193 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1194 /*
1195 * Not enough free descriptors to transmit this
1196 * packet. We haven't committed anything yet,
1197 * so just unload the DMA map, put the packet
1198 * pack on the queue, and punt. Notify the upper
1199 * layer that there are no more slots left.
1200 */
1201 DPRINTF(WM_DEBUG_TX,
1202 ("%s: TX: need %d descriptors, have %d\n",
1203 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1204 sc->sc_txfree - 1));
1205 ifp->if_flags |= IFF_OACTIVE;
1206 bus_dmamap_unload(sc->sc_dmat, dmamap);
1207 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1208 break;
1209 }
1210
1211 IFQ_DEQUEUE(&ifp->if_snd, m0);
1212
1213 /*
1214 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1215 */
1216
1217 /* Sync the DMA map. */
1218 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1219 BUS_DMASYNC_PREWRITE);
1220
1221 DPRINTF(WM_DEBUG_TX,
1222 ("%s: TX: packet has %d DMA segments\n",
1223 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1224
1225 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1226
1227 /*
1228 * Store a pointer to the packet so that we can free it
1229 * later.
1230 *
1231 * Initially, we consider the number of descriptors the
1232 * packet uses the number of DMA segments. This may be
1233 * incremented by 1 if we do checksum offload (a descriptor
1234 * is used to set the checksum context).
1235 */
1236 txs->txs_mbuf = m0;
1237 txs->txs_firstdesc = sc->sc_txnext;
1238 txs->txs_ndesc = dmamap->dm_nsegs;
1239
1240 /*
1241 * Set up checksum offload parameters for
1242 * this packet.
1243 */
1244 if (m0->m_pkthdr.csum_flags &
1245 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1246 if (wm_tx_cksum(sc, txs, &cksumcmd,
1247 &cksumfields) != 0) {
1248 /* Error message already displayed. */
1249 m_freem(m0);
1250 bus_dmamap_unload(sc->sc_dmat, dmamap);
1251 txs->txs_mbuf = NULL;
1252 continue;
1253 }
1254 } else {
1255 cksumcmd = 0;
1256 cksumfields = 0;
1257 }
1258
1259 cksumcmd |= htole32(WTX_CMD_IDE);
1260
1261 /*
1262 * Initialize the transmit descriptor.
1263 */
1264 for (nexttx = sc->sc_txnext, seg = 0;
1265 seg < dmamap->dm_nsegs;
1266 seg++, nexttx = WM_NEXTTX(nexttx)) {
1267 /*
1268 * Note: we currently only use 32-bit DMA
1269 * addresses.
1270 */
1271 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1272 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1273 htole32(dmamap->dm_segs[seg].ds_addr);
1274 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1275 htole32(dmamap->dm_segs[seg].ds_len);
1276 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1277 cksumfields;
1278 lasttx = nexttx;
1279
1280 DPRINTF(WM_DEBUG_TX,
1281 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1282 sc->sc_dev.dv_xname, nexttx,
1283 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1284 (uint32_t) dmamap->dm_segs[seg].ds_len));
1285 }
1286
1287 /*
1288 * Set up the command byte on the last descriptor of
1289 * the packet. If we're in the interrupt delay window,
1290 * delay the interrupt.
1291 */
1292 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1293 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1294
1295 #if 0 /* XXXJRT */
1296 /*
1297 * If VLANs are enabled and the packet has a VLAN tag, set
1298 * up the descriptor to encapsulate the packet for us.
1299 *
1300 * This is only valid on the last descriptor of the packet.
1301 */
1302 if (sc->sc_ethercom.ec_nvlans != 0 &&
1303 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1304 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1305 htole32(WTX_CMD_VLE);
1306 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1307 = htole16(*mtod(m, int *) & 0xffff);
1308 }
1309 #endif /* XXXJRT */
1310
1311 txs->txs_lastdesc = lasttx;
1312
1313 DPRINTF(WM_DEBUG_TX,
1314 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1315 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1316
1317 /* Sync the descriptors we're using. */
1318 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1319 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1320
1321 /* Give the packet to the chip. */
1322 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1323
1324 DPRINTF(WM_DEBUG_TX,
1325 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1326
1327 DPRINTF(WM_DEBUG_TX,
1328 ("%s: TX: finished transmitting packet, job %d\n",
1329 sc->sc_dev.dv_xname, sc->sc_txsnext));
1330
1331 /* Advance the tx pointer. */
1332 sc->sc_txfree -= txs->txs_ndesc;
1333 sc->sc_txnext = nexttx;
1334
1335 sc->sc_txsfree--;
1336 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1337
1338 #if NBPFILTER > 0
1339 /* Pass the packet to any BPF listeners. */
1340 if (ifp->if_bpf)
1341 bpf_mtap(ifp->if_bpf, m0);
1342 #endif /* NBPFILTER > 0 */
1343 }
1344
1345 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1346 /* No more slots; notify upper layer. */
1347 ifp->if_flags |= IFF_OACTIVE;
1348 }
1349
1350 if (sc->sc_txfree != ofree) {
1351 /* Set a watchdog timer in case the chip flakes out. */
1352 ifp->if_timer = 5;
1353 }
1354 }
1355
1356 /*
1357 * wm_watchdog: [ifnet interface function]
1358 *
1359 * Watchdog timer handler.
1360 */
1361 void
1362 wm_watchdog(struct ifnet *ifp)
1363 {
1364 struct wm_softc *sc = ifp->if_softc;
1365
1366 /*
1367 * Since we're using delayed interrupts, sweep up
1368 * before we report an error.
1369 */
1370 wm_txintr(sc);
1371
1372 if (sc->sc_txfree != WM_NTXDESC) {
1373 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1374 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1375 sc->sc_txnext);
1376 ifp->if_oerrors++;
1377
1378 /* Reset the interface. */
1379 (void) wm_init(ifp);
1380 }
1381
1382 /* Try to get more packets going. */
1383 wm_start(ifp);
1384 }
1385
1386 /*
1387 * wm_ioctl: [ifnet interface function]
1388 *
1389 * Handle control requests from the operator.
1390 */
1391 int
1392 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1393 {
1394 struct wm_softc *sc = ifp->if_softc;
1395 struct ifreq *ifr = (struct ifreq *) data;
1396 int s, error;
1397
1398 s = splnet();
1399
1400 switch (cmd) {
1401 case SIOCSIFMEDIA:
1402 case SIOCGIFMEDIA:
1403 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1404 break;
1405
1406 default:
1407 error = ether_ioctl(ifp, cmd, data);
1408 if (error == ENETRESET) {
1409 /*
1410 * Multicast list has changed; set the hardware filter
1411 * accordingly.
1412 */
1413 wm_set_filter(sc);
1414 error = 0;
1415 }
1416 break;
1417 }
1418
1419 /* Try to get more packets going. */
1420 wm_start(ifp);
1421
1422 splx(s);
1423 return (error);
1424 }
1425
1426 /*
1427 * wm_intr:
1428 *
1429 * Interrupt service routine.
1430 */
1431 int
1432 wm_intr(void *arg)
1433 {
1434 struct wm_softc *sc = arg;
1435 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1436 uint32_t icr;
1437 int wantinit, handled = 0;
1438
1439 for (wantinit = 0; wantinit == 0;) {
1440 icr = CSR_READ(sc, WMREG_ICR);
1441 if ((icr & sc->sc_icr) == 0)
1442 break;
1443
1444 #if NRND > 0
1445 if (RND_ENABLED(&sc->rnd_source))
1446 rnd_add_uint32(&sc->rnd_source, icr);
1447 #endif
1448
1449 handled = 1;
1450
1451 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1452 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1453 DPRINTF(WM_DEBUG_RX,
1454 ("%s: RX: got Rx intr 0x%08x\n",
1455 sc->sc_dev.dv_xname,
1456 icr & (ICR_RXDMT0|ICR_RXT0)));
1457 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1458 }
1459 #endif
1460 wm_rxintr(sc);
1461
1462 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1463 if (icr & ICR_TXDW) {
1464 DPRINTF(WM_DEBUG_TX,
1465 ("%s: TX: got TDXW interrupt\n",
1466 sc->sc_dev.dv_xname));
1467 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1468 }
1469 #endif
1470 wm_txintr(sc);
1471
1472 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1473 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1474 wm_linkintr(sc, icr);
1475 }
1476
1477 if (icr & ICR_RXO) {
1478 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1479 wantinit = 1;
1480 }
1481 }
1482
1483 if (handled) {
1484 if (wantinit)
1485 wm_init(ifp);
1486
1487 /* Try to get more packets going. */
1488 wm_start(ifp);
1489 }
1490
1491 return (handled);
1492 }
1493
1494 /*
1495 * wm_txintr:
1496 *
1497 * Helper; handle transmit interrupts.
1498 */
1499 void
1500 wm_txintr(struct wm_softc *sc)
1501 {
1502 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1503 struct wm_txsoft *txs;
1504 uint8_t status;
1505 int i;
1506
1507 ifp->if_flags &= ~IFF_OACTIVE;
1508
1509 /*
1510 * Go through the Tx list and free mbufs for those
1511 * frames which have been transmitted.
1512 */
1513 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1514 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1515 txs = &sc->sc_txsoft[i];
1516
1517 DPRINTF(WM_DEBUG_TX,
1518 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1519
1520 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1521 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1522
1523 status = le32toh(sc->sc_txdescs[
1524 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1525 if ((status & WTX_ST_DD) == 0) {
1526 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1527 BUS_DMASYNC_PREREAD);
1528 break;
1529 }
1530
1531 DPRINTF(WM_DEBUG_TX,
1532 ("%s: TX: job %d done: descs %d..%d\n",
1533 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1534 txs->txs_lastdesc));
1535
1536 /*
1537 * XXX We should probably be using the statistics
1538 * XXX registers, but I don't know if they exist
1539 * XXX on chips before the i82544.
1540 */
1541
1542 #ifdef WM_EVENT_COUNTERS
1543 if (status & WTX_ST_TU)
1544 WM_EVCNT_INCR(&sc->sc_ev_tu);
1545 #endif /* WM_EVENT_COUNTERS */
1546
1547 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1548 ifp->if_oerrors++;
1549 if (status & WTX_ST_LC)
1550 printf("%s: late collision\n",
1551 sc->sc_dev.dv_xname);
1552 else if (status & WTX_ST_EC) {
1553 ifp->if_collisions += 16;
1554 printf("%s: excessive collisions\n",
1555 sc->sc_dev.dv_xname);
1556 }
1557 } else
1558 ifp->if_opackets++;
1559
1560 sc->sc_txfree += txs->txs_ndesc;
1561 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1562 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1563 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1564 m_freem(txs->txs_mbuf);
1565 txs->txs_mbuf = NULL;
1566 }
1567
1568 /* Update the dirty transmit buffer pointer. */
1569 sc->sc_txsdirty = i;
1570 DPRINTF(WM_DEBUG_TX,
1571 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1572
1573 /*
1574 * If there are no more pending transmissions, cancel the watchdog
1575 * timer.
1576 */
1577 if (sc->sc_txsfree == WM_TXQUEUELEN)
1578 ifp->if_timer = 0;
1579 }
1580
1581 /*
1582 * wm_rxintr:
1583 *
1584 * Helper; handle receive interrupts.
1585 */
1586 void
1587 wm_rxintr(struct wm_softc *sc)
1588 {
1589 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1590 struct wm_rxsoft *rxs;
1591 struct mbuf *m;
1592 int i, len;
1593 uint8_t status, errors;
1594
1595 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1596 rxs = &sc->sc_rxsoft[i];
1597
1598 DPRINTF(WM_DEBUG_RX,
1599 ("%s: RX: checking descriptor %d\n",
1600 sc->sc_dev.dv_xname, i));
1601
1602 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1603
1604 status = sc->sc_rxdescs[i].wrx_status;
1605 errors = sc->sc_rxdescs[i].wrx_errors;
1606 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1607
1608 if ((status & WRX_ST_DD) == 0) {
1609 /*
1610 * We have processed all of the receive descriptors.
1611 */
1612 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1613 break;
1614 }
1615
1616 if (__predict_false(sc->sc_rxdiscard)) {
1617 DPRINTF(WM_DEBUG_RX,
1618 ("%s: RX: discarding contents of descriptor %d\n",
1619 sc->sc_dev.dv_xname, i));
1620 WM_INIT_RXDESC(sc, i);
1621 if (status & WRX_ST_EOP) {
1622 /* Reset our state. */
1623 DPRINTF(WM_DEBUG_RX,
1624 ("%s: RX: resetting rxdiscard -> 0\n",
1625 sc->sc_dev.dv_xname));
1626 sc->sc_rxdiscard = 0;
1627 }
1628 continue;
1629 }
1630
1631 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1632 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1633
1634 m = rxs->rxs_mbuf;
1635
1636 /*
1637 * Add a new receive buffer to the ring.
1638 */
1639 if (wm_add_rxbuf(sc, i) != 0) {
1640 /*
1641 * Failed, throw away what we've done so
1642 * far, and discard the rest of the packet.
1643 */
1644 ifp->if_ierrors++;
1645 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1646 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1647 WM_INIT_RXDESC(sc, i);
1648 if ((status & WRX_ST_EOP) == 0)
1649 sc->sc_rxdiscard = 1;
1650 if (sc->sc_rxhead != NULL)
1651 m_freem(sc->sc_rxhead);
1652 WM_RXCHAIN_RESET(sc);
1653 DPRINTF(WM_DEBUG_RX,
1654 ("%s: RX: Rx buffer allocation failed, "
1655 "dropping packet%s\n", sc->sc_dev.dv_xname,
1656 sc->sc_rxdiscard ? " (discard)" : ""));
1657 continue;
1658 }
1659
1660 WM_RXCHAIN_LINK(sc, m);
1661
1662 m->m_len = len;
1663
1664 DPRINTF(WM_DEBUG_RX,
1665 ("%s: RX: buffer at %p len %d\n",
1666 sc->sc_dev.dv_xname, m->m_data, len));
1667
1668 /*
1669 * If this is not the end of the packet, keep
1670 * looking.
1671 */
1672 if ((status & WRX_ST_EOP) == 0) {
1673 sc->sc_rxlen += len;
1674 DPRINTF(WM_DEBUG_RX,
1675 ("%s: RX: not yet EOP, rxlen -> %d\n",
1676 sc->sc_dev.dv_xname, sc->sc_rxlen));
1677 continue;
1678 }
1679
1680 /*
1681 * Okay, we have the entire packet now...
1682 */
1683 *sc->sc_rxtailp = NULL;
1684 m = sc->sc_rxhead;
1685 len += sc->sc_rxlen;
1686
1687 WM_RXCHAIN_RESET(sc);
1688
1689 DPRINTF(WM_DEBUG_RX,
1690 ("%s: RX: have entire packet, len -> %d\n",
1691 sc->sc_dev.dv_xname, len));
1692
1693 /*
1694 * If an error occurred, update stats and drop the packet.
1695 */
1696 if (errors &
1697 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1698 ifp->if_ierrors++;
1699 if (errors & WRX_ER_SE)
1700 printf("%s: symbol error\n",
1701 sc->sc_dev.dv_xname);
1702 else if (errors & WRX_ER_SEQ)
1703 printf("%s: receive sequence error\n",
1704 sc->sc_dev.dv_xname);
1705 else if (errors & WRX_ER_CE)
1706 printf("%s: CRC error\n",
1707 sc->sc_dev.dv_xname);
1708 m_freem(m);
1709 continue;
1710 }
1711
1712 /*
1713 * No errors. Receive the packet.
1714 *
1715 * Note, we have configured the chip to include the
1716 * CRC with every packet.
1717 */
1718 m->m_flags |= M_HASFCS;
1719 m->m_pkthdr.rcvif = ifp;
1720 m->m_pkthdr.len = len;
1721
1722 #if 0 /* XXXJRT */
1723 /*
1724 * If VLANs are enabled, VLAN packets have been unwrapped
1725 * for us. Associate the tag with the packet.
1726 */
1727 if (sc->sc_ethercom.ec_nvlans != 0 &&
1728 (status & WRX_ST_VP) != 0) {
1729 struct mbuf *vtag;
1730
1731 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1732 if (vtag == NULL) {
1733 ifp->if_ierrors++;
1734 printf("%s: unable to allocate VLAN tag\n",
1735 sc->sc_dev.dv_xname);
1736 m_freem(m);
1737 continue;
1738 }
1739
1740 *mtod(m, int *) =
1741 le16toh(sc->sc_rxdescs[i].wrx_special);
1742 vtag->m_len = sizeof(int);
1743 }
1744 #endif /* XXXJRT */
1745
1746 /*
1747 * Set up checksum info for this packet.
1748 */
1749 if (status & WRX_ST_IPCS) {
1750 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1751 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1752 if (errors & WRX_ER_IPE)
1753 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1754 }
1755 if (status & WRX_ST_TCPCS) {
1756 /*
1757 * Note: we don't know if this was TCP or UDP,
1758 * so we just set both bits, and expect the
1759 * upper layers to deal.
1760 */
1761 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1762 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1763 if (errors & WRX_ER_TCPE)
1764 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1765 }
1766
1767 ifp->if_ipackets++;
1768
1769 #if NBPFILTER > 0
1770 /* Pass this up to any BPF listeners. */
1771 if (ifp->if_bpf)
1772 bpf_mtap(ifp->if_bpf, m);
1773 #endif /* NBPFILTER > 0 */
1774
1775 /* Pass it on. */
1776 (*ifp->if_input)(ifp, m);
1777 }
1778
1779 /* Update the receive pointer. */
1780 sc->sc_rxptr = i;
1781
1782 DPRINTF(WM_DEBUG_RX,
1783 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1784 }
1785
1786 /*
1787 * wm_linkintr:
1788 *
1789 * Helper; handle link interrupts.
1790 */
1791 void
1792 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1793 {
1794 uint32_t status;
1795
1796 /*
1797 * If we get a link status interrupt on a 1000BASE-T
1798 * device, just fall into the normal MII tick path.
1799 */
1800 if (sc->sc_flags & WM_F_HAS_MII) {
1801 if (icr & ICR_LSC) {
1802 DPRINTF(WM_DEBUG_LINK,
1803 ("%s: LINK: LSC -> mii_tick\n",
1804 sc->sc_dev.dv_xname));
1805 mii_tick(&sc->sc_mii);
1806 } else if (icr & ICR_RXSEQ) {
1807 DPRINTF(WM_DEBUG_LINK,
1808 ("%s: LINK Receive sequence error\n",
1809 sc->sc_dev.dv_xname));
1810 }
1811 return;
1812 }
1813
1814 /*
1815 * If we are now receiving /C/, check for link again in
1816 * a couple of link clock ticks.
1817 */
1818 if (icr & ICR_RXCFG) {
1819 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1820 sc->sc_dev.dv_xname));
1821 sc->sc_tbi_anstate = 2;
1822 }
1823
1824 if (icr & ICR_LSC) {
1825 status = CSR_READ(sc, WMREG_STATUS);
1826 if (status & STATUS_LU) {
1827 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1828 sc->sc_dev.dv_xname,
1829 (status & STATUS_FD) ? "FDX" : "HDX"));
1830 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1831 if (status & STATUS_FD)
1832 sc->sc_tctl |=
1833 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1834 else
1835 sc->sc_tctl |=
1836 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1837 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1838 sc->sc_tbi_linkup = 1;
1839 } else {
1840 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1841 sc->sc_dev.dv_xname));
1842 sc->sc_tbi_linkup = 0;
1843 }
1844 sc->sc_tbi_anstate = 2;
1845 wm_tbi_set_linkled(sc);
1846 } else if (icr & ICR_RXSEQ) {
1847 DPRINTF(WM_DEBUG_LINK,
1848 ("%s: LINK: Receive sequence error\n",
1849 sc->sc_dev.dv_xname));
1850 }
1851 }
1852
1853 /*
1854 * wm_tick:
1855 *
1856 * One second timer, used to check link status, sweep up
1857 * completed transmit jobs, etc.
1858 */
1859 void
1860 wm_tick(void *arg)
1861 {
1862 struct wm_softc *sc = arg;
1863 int s;
1864
1865 s = splnet();
1866
1867 if (sc->sc_flags & WM_F_HAS_MII)
1868 mii_tick(&sc->sc_mii);
1869 else
1870 wm_tbi_check_link(sc);
1871
1872 splx(s);
1873
1874 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1875 }
1876
1877 /*
1878 * wm_reset:
1879 *
1880 * Reset the i82542 chip.
1881 */
1882 void
1883 wm_reset(struct wm_softc *sc)
1884 {
1885 int i;
1886
1887 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1888 delay(10000);
1889
1890 for (i = 0; i < 1000; i++) {
1891 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1892 return;
1893 delay(20);
1894 }
1895
1896 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1897 printf("%s: WARNING: reset failed to complete\n",
1898 sc->sc_dev.dv_xname);
1899 }
1900
1901 /*
1902 * wm_init: [ifnet interface function]
1903 *
1904 * Initialize the interface. Must be called at splnet().
1905 */
1906 int
1907 wm_init(struct ifnet *ifp)
1908 {
1909 struct wm_softc *sc = ifp->if_softc;
1910 struct wm_rxsoft *rxs;
1911 int i, error = 0;
1912 uint32_t reg;
1913
1914 /* Cancel any pending I/O. */
1915 wm_stop(ifp, 0);
1916
1917 /* Reset the chip to a known state. */
1918 wm_reset(sc);
1919
1920 /* Initialize the transmit descriptor ring. */
1921 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1922 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1923 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1924 sc->sc_txfree = WM_NTXDESC;
1925 sc->sc_txnext = 0;
1926
1927 sc->sc_txctx_ipcs = 0xffffffff;
1928 sc->sc_txctx_tucs = 0xffffffff;
1929
1930 if (sc->sc_type < WM_T_82543) {
1931 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1932 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1933 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1934 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1935 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1936 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1937 } else {
1938 CSR_WRITE(sc, WMREG_TBDAH, 0);
1939 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1940 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1941 CSR_WRITE(sc, WMREG_TDH, 0);
1942 CSR_WRITE(sc, WMREG_TDT, 0);
1943 CSR_WRITE(sc, WMREG_TIDV, 128);
1944
1945 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1946 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1947 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1948 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1949 }
1950 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1951 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1952
1953 /* Initialize the transmit job descriptors. */
1954 for (i = 0; i < WM_TXQUEUELEN; i++)
1955 sc->sc_txsoft[i].txs_mbuf = NULL;
1956 sc->sc_txsfree = WM_TXQUEUELEN;
1957 sc->sc_txsnext = 0;
1958 sc->sc_txsdirty = 0;
1959
1960 /*
1961 * Initialize the receive descriptor and receive job
1962 * descriptor rings.
1963 */
1964 if (sc->sc_type < WM_T_82543) {
1965 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1966 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1967 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1968 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1969 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1970 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1971
1972 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1973 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1974 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1975 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1976 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1977 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1978 } else {
1979 CSR_WRITE(sc, WMREG_RDBAH, 0);
1980 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1981 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1982 CSR_WRITE(sc, WMREG_RDH, 0);
1983 CSR_WRITE(sc, WMREG_RDT, 0);
1984 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1985 }
1986 for (i = 0; i < WM_NRXDESC; i++) {
1987 rxs = &sc->sc_rxsoft[i];
1988 if (rxs->rxs_mbuf == NULL) {
1989 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1990 printf("%s: unable to allocate or map rx "
1991 "buffer %d, error = %d\n",
1992 sc->sc_dev.dv_xname, i, error);
1993 /*
1994 * XXX Should attempt to run with fewer receive
1995 * XXX buffers instead of just failing.
1996 */
1997 wm_rxdrain(sc);
1998 goto out;
1999 }
2000 } else
2001 WM_INIT_RXDESC(sc, i);
2002 }
2003 sc->sc_rxptr = 0;
2004 sc->sc_rxdiscard = 0;
2005 WM_RXCHAIN_RESET(sc);
2006
2007 /*
2008 * Clear out the VLAN table -- we don't use it (yet).
2009 */
2010 CSR_WRITE(sc, WMREG_VET, 0);
2011 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2012 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2013
2014 /*
2015 * Set up flow-control parameters.
2016 *
2017 * XXX Values could probably stand some tuning.
2018 */
2019 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2020 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2021 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2022 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2023
2024 if (sc->sc_type < WM_T_82543) {
2025 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2026 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2027 } else {
2028 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2029 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2030 }
2031 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2032 }
2033
2034 #if 0 /* XXXJRT */
2035 /* Deal with VLAN enables. */
2036 if (sc->sc_ethercom.ec_nvlans != 0)
2037 sc->sc_ctrl |= CTRL_VME;
2038 else
2039 #endif /* XXXJRT */
2040 sc->sc_ctrl &= ~CTRL_VME;
2041
2042 /* Write the control registers. */
2043 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2044 #if 0
2045 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2046 #endif
2047
2048 /*
2049 * Set up checksum offload parameters.
2050 */
2051 reg = CSR_READ(sc, WMREG_RXCSUM);
2052 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2053 reg |= RXCSUM_IPOFL;
2054 else
2055 reg &= ~RXCSUM_IPOFL;
2056 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2057 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2058 else {
2059 reg &= ~RXCSUM_TUOFL;
2060 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2061 reg &= ~RXCSUM_IPOFL;
2062 }
2063 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2064
2065 /*
2066 * Set up the interrupt registers.
2067 */
2068 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2069 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2070 ICR_RXO | ICR_RXT0;
2071 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2072 sc->sc_icr |= ICR_RXCFG;
2073 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2074
2075 /* Set up the inter-packet gap. */
2076 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2077
2078 #if 0 /* XXXJRT */
2079 /* Set the VLAN ethernetype. */
2080 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2081 #endif
2082
2083 /*
2084 * Set up the transmit control register; we start out with
2085 * a collision distance suitable for FDX, but update it whe
2086 * we resolve the media type.
2087 */
2088 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2089 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2090 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2091
2092 /* Set the media. */
2093 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2094
2095 /*
2096 * Set up the receive control register; we actually program
2097 * the register when we set the receive filter. Use multicast
2098 * address offset type 0.
2099 *
2100 * Only the i82544 has the ability to strip the incoming
2101 * CRC, so we don't enable that feature.
2102 */
2103 sc->sc_mchash_type = 0;
2104 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2105 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2106
2107 /* Set the receive filter. */
2108 wm_set_filter(sc);
2109
2110 /* Start the one second link check clock. */
2111 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2112
2113 /* ...all done! */
2114 ifp->if_flags |= IFF_RUNNING;
2115 ifp->if_flags &= ~IFF_OACTIVE;
2116
2117 out:
2118 if (error)
2119 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2120 return (error);
2121 }
2122
2123 /*
2124 * wm_rxdrain:
2125 *
2126 * Drain the receive queue.
2127 */
2128 void
2129 wm_rxdrain(struct wm_softc *sc)
2130 {
2131 struct wm_rxsoft *rxs;
2132 int i;
2133
2134 for (i = 0; i < WM_NRXDESC; i++) {
2135 rxs = &sc->sc_rxsoft[i];
2136 if (rxs->rxs_mbuf != NULL) {
2137 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2138 m_freem(rxs->rxs_mbuf);
2139 rxs->rxs_mbuf = NULL;
2140 }
2141 }
2142 }
2143
2144 /*
2145 * wm_stop: [ifnet interface function]
2146 *
2147 * Stop transmission on the interface.
2148 */
2149 void
2150 wm_stop(struct ifnet *ifp, int disable)
2151 {
2152 struct wm_softc *sc = ifp->if_softc;
2153 struct wm_txsoft *txs;
2154 int i;
2155
2156 /* Stop the one second clock. */
2157 callout_stop(&sc->sc_tick_ch);
2158
2159 if (sc->sc_flags & WM_F_HAS_MII) {
2160 /* Down the MII. */
2161 mii_down(&sc->sc_mii);
2162 }
2163
2164 /* Stop the transmit and receive processes. */
2165 CSR_WRITE(sc, WMREG_TCTL, 0);
2166 CSR_WRITE(sc, WMREG_RCTL, 0);
2167
2168 /* Release any queued transmit buffers. */
2169 for (i = 0; i < WM_TXQUEUELEN; i++) {
2170 txs = &sc->sc_txsoft[i];
2171 if (txs->txs_mbuf != NULL) {
2172 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2173 m_freem(txs->txs_mbuf);
2174 txs->txs_mbuf = NULL;
2175 }
2176 }
2177
2178 if (disable)
2179 wm_rxdrain(sc);
2180
2181 /* Mark the interface as down and cancel the watchdog timer. */
2182 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2183 ifp->if_timer = 0;
2184 }
2185
2186 /*
2187 * wm_read_eeprom:
2188 *
2189 * Read data from the serial EEPROM.
2190 */
2191 void
2192 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2193 {
2194 uint32_t reg;
2195 int i, x, addrbits = 6;
2196
2197 for (i = 0; i < wordcnt; i++) {
2198 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2199 reg = CSR_READ(sc, WMREG_EECD);
2200
2201 /* Get number of address bits. */
2202 if (reg & EECD_EE_SIZE)
2203 addrbits = 8;
2204
2205 /* Request EEPROM access. */
2206 reg |= EECD_EE_REQ;
2207 CSR_WRITE(sc, WMREG_EECD, reg);
2208
2209 /* ..and wait for it to be granted. */
2210 for (x = 0; x < 100; x++) {
2211 reg = CSR_READ(sc, WMREG_EECD);
2212 if (reg & EECD_EE_GNT)
2213 break;
2214 delay(5);
2215 }
2216 if ((reg & EECD_EE_GNT) == 0) {
2217 printf("%s: could not acquire EEPROM GNT\n",
2218 sc->sc_dev.dv_xname);
2219 *data = 0xffff;
2220 reg &= ~EECD_EE_REQ;
2221 CSR_WRITE(sc, WMREG_EECD, reg);
2222 continue;
2223 }
2224 } else
2225 reg = 0;
2226
2227 /* Clear SK and DI. */
2228 reg &= ~(EECD_SK | EECD_DI);
2229 CSR_WRITE(sc, WMREG_EECD, reg);
2230
2231 /* Set CHIP SELECT. */
2232 reg |= EECD_CS;
2233 CSR_WRITE(sc, WMREG_EECD, reg);
2234 delay(2);
2235
2236 /* Shift in the READ command. */
2237 for (x = 3; x > 0; x--) {
2238 if (UWIRE_OPC_READ & (1 << (x - 1)))
2239 reg |= EECD_DI;
2240 else
2241 reg &= ~EECD_DI;
2242 CSR_WRITE(sc, WMREG_EECD, reg);
2243 delay(2);
2244 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2245 delay(2);
2246 CSR_WRITE(sc, WMREG_EECD, reg);
2247 delay(2);
2248 }
2249
2250 /* Shift in address. */
2251 for (x = addrbits; x > 0; x--) {
2252 if ((word + i) & (1 << (x - 1)))
2253 reg |= EECD_DI;
2254 else
2255 reg &= ~EECD_DI;
2256 CSR_WRITE(sc, WMREG_EECD, reg);
2257 delay(2);
2258 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2259 delay(2);
2260 CSR_WRITE(sc, WMREG_EECD, reg);
2261 delay(2);
2262 }
2263
2264 /* Shift out the data. */
2265 reg &= ~EECD_DI;
2266 data[i] = 0;
2267 for (x = 16; x > 0; x--) {
2268 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2269 delay(2);
2270 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2271 data[i] |= (1 << (x - 1));
2272 CSR_WRITE(sc, WMREG_EECD, reg);
2273 delay(2);
2274 }
2275
2276 /* Clear CHIP SELECT. */
2277 reg &= ~EECD_CS;
2278 CSR_WRITE(sc, WMREG_EECD, reg);
2279 delay(2);
2280
2281 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2282 /* Release the EEPROM. */
2283 reg &= ~EECD_EE_REQ;
2284 CSR_WRITE(sc, WMREG_EECD, reg);
2285 }
2286 }
2287 }
2288
2289 /*
2290 * wm_add_rxbuf:
2291 *
2292 * Add a receive buffer to the indiciated descriptor.
2293 */
2294 int
2295 wm_add_rxbuf(struct wm_softc *sc, int idx)
2296 {
2297 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2298 struct mbuf *m;
2299 int error;
2300
2301 MGETHDR(m, M_DONTWAIT, MT_DATA);
2302 if (m == NULL)
2303 return (ENOBUFS);
2304
2305 MCLGET(m, M_DONTWAIT);
2306 if ((m->m_flags & M_EXT) == 0) {
2307 m_freem(m);
2308 return (ENOBUFS);
2309 }
2310
2311 if (rxs->rxs_mbuf != NULL)
2312 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2313
2314 rxs->rxs_mbuf = m;
2315
2316 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2317 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2318 BUS_DMA_READ|BUS_DMA_NOWAIT);
2319 if (error) {
2320 printf("%s: unable to load rx DMA map %d, error = %d\n",
2321 sc->sc_dev.dv_xname, idx, error);
2322 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2323 }
2324
2325 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2326 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2327
2328 WM_INIT_RXDESC(sc, idx);
2329
2330 return (0);
2331 }
2332
2333 /*
2334 * wm_set_ral:
2335 *
2336 * Set an entery in the receive address list.
2337 */
2338 static void
2339 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2340 {
2341 uint32_t ral_lo, ral_hi;
2342
2343 if (enaddr != NULL) {
2344 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2345 (enaddr[3] << 24);
2346 ral_hi = enaddr[4] | (enaddr[5] << 8);
2347 ral_hi |= RAL_AV;
2348 } else {
2349 ral_lo = 0;
2350 ral_hi = 0;
2351 }
2352
2353 if (sc->sc_type >= WM_T_82544) {
2354 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2355 ral_lo);
2356 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2357 ral_hi);
2358 } else {
2359 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2360 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2361 }
2362 }
2363
2364 /*
2365 * wm_mchash:
2366 *
2367 * Compute the hash of the multicast address for the 4096-bit
2368 * multicast filter.
2369 */
2370 static uint32_t
2371 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2372 {
2373 static const int lo_shift[4] = { 4, 3, 2, 0 };
2374 static const int hi_shift[4] = { 4, 5, 6, 8 };
2375 uint32_t hash;
2376
2377 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2378 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2379
2380 return (hash & 0xfff);
2381 }
2382
2383 /*
2384 * wm_set_filter:
2385 *
2386 * Set up the receive filter.
2387 */
2388 void
2389 wm_set_filter(struct wm_softc *sc)
2390 {
2391 struct ethercom *ec = &sc->sc_ethercom;
2392 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2393 struct ether_multi *enm;
2394 struct ether_multistep step;
2395 bus_addr_t mta_reg;
2396 uint32_t hash, reg, bit;
2397 int i;
2398
2399 if (sc->sc_type >= WM_T_82544)
2400 mta_reg = WMREG_CORDOVA_MTA;
2401 else
2402 mta_reg = WMREG_MTA;
2403
2404 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2405
2406 if (ifp->if_flags & IFF_BROADCAST)
2407 sc->sc_rctl |= RCTL_BAM;
2408 if (ifp->if_flags & IFF_PROMISC) {
2409 sc->sc_rctl |= RCTL_UPE;
2410 goto allmulti;
2411 }
2412
2413 /*
2414 * Set the station address in the first RAL slot, and
2415 * clear the remaining slots.
2416 */
2417 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2418 for (i = 1; i < WM_RAL_TABSIZE; i++)
2419 wm_set_ral(sc, NULL, i);
2420
2421 /* Clear out the multicast table. */
2422 for (i = 0; i < WM_MC_TABSIZE; i++)
2423 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2424
2425 ETHER_FIRST_MULTI(step, ec, enm);
2426 while (enm != NULL) {
2427 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2428 /*
2429 * We must listen to a range of multicast addresses.
2430 * For now, just accept all multicasts, rather than
2431 * trying to set only those filter bits needed to match
2432 * the range. (At this time, the only use of address
2433 * ranges is for IP multicast routing, for which the
2434 * range is big enough to require all bits set.)
2435 */
2436 goto allmulti;
2437 }
2438
2439 hash = wm_mchash(sc, enm->enm_addrlo);
2440
2441 reg = (hash >> 5) & 0x7f;
2442 bit = hash & 0x1f;
2443
2444 hash = CSR_READ(sc, mta_reg + (reg << 2));
2445 hash |= 1U << bit;
2446
2447 /* XXX Hardware bug?? */
2448 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2449 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2450 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2451 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2452 } else
2453 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2454
2455 ETHER_NEXT_MULTI(step, enm);
2456 }
2457
2458 ifp->if_flags &= ~IFF_ALLMULTI;
2459 goto setit;
2460
2461 allmulti:
2462 ifp->if_flags |= IFF_ALLMULTI;
2463 sc->sc_rctl |= RCTL_MPE;
2464
2465 setit:
2466 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2467 }
2468
2469 /*
2470 * wm_tbi_mediainit:
2471 *
2472 * Initialize media for use on 1000BASE-X devices.
2473 */
2474 void
2475 wm_tbi_mediainit(struct wm_softc *sc)
2476 {
2477 const char *sep = "";
2478
2479 if (sc->sc_type < WM_T_82543)
2480 sc->sc_tipg = TIPG_WM_DFLT;
2481 else
2482 sc->sc_tipg = TIPG_LG_DFLT;
2483
2484 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2485 wm_tbi_mediastatus);
2486
2487 /*
2488 * SWD Pins:
2489 *
2490 * 0 = Link LED (output)
2491 * 1 = Loss Of Signal (input)
2492 */
2493 sc->sc_ctrl |= CTRL_SWDPIO(0);
2494 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2495
2496 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2497
2498 #define ADD(s, m, d) \
2499 do { \
2500 printf("%s%s", sep, s); \
2501 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2502 sep = ", "; \
2503 } while (/*CONSTCOND*/0)
2504
2505 printf("%s: ", sc->sc_dev.dv_xname);
2506 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2507 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2508 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2509 printf("\n");
2510
2511 #undef ADD
2512
2513 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2514 }
2515
2516 /*
2517 * wm_tbi_mediastatus: [ifmedia interface function]
2518 *
2519 * Get the current interface media status on a 1000BASE-X device.
2520 */
2521 void
2522 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2523 {
2524 struct wm_softc *sc = ifp->if_softc;
2525
2526 ifmr->ifm_status = IFM_AVALID;
2527 ifmr->ifm_active = IFM_ETHER;
2528
2529 if (sc->sc_tbi_linkup == 0) {
2530 ifmr->ifm_active |= IFM_NONE;
2531 return;
2532 }
2533
2534 ifmr->ifm_status |= IFM_ACTIVE;
2535 ifmr->ifm_active |= IFM_1000_SX;
2536 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2537 ifmr->ifm_active |= IFM_FDX;
2538 }
2539
2540 /*
2541 * wm_tbi_mediachange: [ifmedia interface function]
2542 *
2543 * Set hardware to newly-selected media on a 1000BASE-X device.
2544 */
2545 int
2546 wm_tbi_mediachange(struct ifnet *ifp)
2547 {
2548 struct wm_softc *sc = ifp->if_softc;
2549 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2550 uint32_t status;
2551 int i;
2552
2553 sc->sc_txcw = ife->ifm_data;
2554 if (sc->sc_ctrl & CTRL_RFCE)
2555 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2556 if (sc->sc_ctrl & CTRL_TFCE)
2557 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2558 sc->sc_txcw |= TXCW_ANE;
2559
2560 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2561 delay(10000);
2562
2563 sc->sc_tbi_anstate = 0;
2564
2565 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2566 /* Have signal; wait for the link to come up. */
2567 for (i = 0; i < 50; i++) {
2568 delay(10000);
2569 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2570 break;
2571 }
2572
2573 status = CSR_READ(sc, WMREG_STATUS);
2574 if (status & STATUS_LU) {
2575 /* Link is up. */
2576 DPRINTF(WM_DEBUG_LINK,
2577 ("%s: LINK: set media -> link up %s\n",
2578 sc->sc_dev.dv_xname,
2579 (status & STATUS_FD) ? "FDX" : "HDX"));
2580 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2581 if (status & STATUS_FD)
2582 sc->sc_tctl |=
2583 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2584 else
2585 sc->sc_tctl |=
2586 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2587 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2588 sc->sc_tbi_linkup = 1;
2589 } else {
2590 /* Link is down. */
2591 DPRINTF(WM_DEBUG_LINK,
2592 ("%s: LINK: set media -> link down\n",
2593 sc->sc_dev.dv_xname));
2594 sc->sc_tbi_linkup = 0;
2595 }
2596 } else {
2597 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2598 sc->sc_dev.dv_xname));
2599 sc->sc_tbi_linkup = 0;
2600 }
2601
2602 wm_tbi_set_linkled(sc);
2603
2604 return (0);
2605 }
2606
2607 /*
2608 * wm_tbi_set_linkled:
2609 *
2610 * Update the link LED on 1000BASE-X devices.
2611 */
2612 void
2613 wm_tbi_set_linkled(struct wm_softc *sc)
2614 {
2615
2616 if (sc->sc_tbi_linkup)
2617 sc->sc_ctrl |= CTRL_SWDPIN(0);
2618 else
2619 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2620
2621 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2622 }
2623
2624 /*
2625 * wm_tbi_check_link:
2626 *
2627 * Check the link on 1000BASE-X devices.
2628 */
2629 void
2630 wm_tbi_check_link(struct wm_softc *sc)
2631 {
2632 uint32_t rxcw, ctrl, status;
2633
2634 if (sc->sc_tbi_anstate == 0)
2635 return;
2636 else if (sc->sc_tbi_anstate > 1) {
2637 DPRINTF(WM_DEBUG_LINK,
2638 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2639 sc->sc_tbi_anstate));
2640 sc->sc_tbi_anstate--;
2641 return;
2642 }
2643
2644 sc->sc_tbi_anstate = 0;
2645
2646 rxcw = CSR_READ(sc, WMREG_RXCW);
2647 ctrl = CSR_READ(sc, WMREG_CTRL);
2648 status = CSR_READ(sc, WMREG_STATUS);
2649
2650 if ((status & STATUS_LU) == 0) {
2651 DPRINTF(WM_DEBUG_LINK,
2652 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2653 sc->sc_tbi_linkup = 0;
2654 } else {
2655 DPRINTF(WM_DEBUG_LINK,
2656 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2657 (status & STATUS_FD) ? "FDX" : "HDX"));
2658 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2659 if (status & STATUS_FD)
2660 sc->sc_tctl |=
2661 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2662 else
2663 sc->sc_tctl |=
2664 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2665 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2666 sc->sc_tbi_linkup = 1;
2667 }
2668
2669 wm_tbi_set_linkled(sc);
2670 }
2671
2672 /*
2673 * wm_gmii_reset:
2674 *
2675 * Reset the PHY.
2676 */
2677 void
2678 wm_gmii_reset(struct wm_softc *sc)
2679 {
2680 uint32_t reg;
2681
2682 if (sc->sc_type >= WM_T_82544) {
2683 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2684 delay(20000);
2685
2686 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2687 delay(20000);
2688 } else {
2689 /* The PHY reset pin is active-low. */
2690 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2691 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2692 CTRL_EXT_SWDPIN(4));
2693 reg |= CTRL_EXT_SWDPIO(4);
2694
2695 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2696 delay(10);
2697
2698 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2699 delay(10);
2700
2701 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2702 delay(10);
2703 #if 0
2704 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2705 #endif
2706 }
2707 }
2708
2709 /*
2710 * wm_gmii_mediainit:
2711 *
2712 * Initialize media for use on 1000BASE-T devices.
2713 */
2714 void
2715 wm_gmii_mediainit(struct wm_softc *sc)
2716 {
2717 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2718
2719 /* We have MII. */
2720 sc->sc_flags |= WM_F_HAS_MII;
2721
2722 sc->sc_tipg = TIPG_1000T_DFLT;
2723
2724 /*
2725 * Let the chip set speed/duplex on its own based on
2726 * signals from the PHY.
2727 */
2728 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2729 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2730
2731 /* Initialize our media structures and probe the GMII. */
2732 sc->sc_mii.mii_ifp = ifp;
2733
2734 if (sc->sc_type >= WM_T_82544) {
2735 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2736 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2737 } else {
2738 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2739 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2740 }
2741 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2742
2743 wm_gmii_reset(sc);
2744
2745 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2746 wm_gmii_mediastatus);
2747
2748 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2749 MII_OFFSET_ANY, 0);
2750 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2751 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2752 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2753 } else
2754 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2755 }
2756
2757 /*
2758 * wm_gmii_mediastatus: [ifmedia interface function]
2759 *
2760 * Get the current interface media status on a 1000BASE-T device.
2761 */
2762 void
2763 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2764 {
2765 struct wm_softc *sc = ifp->if_softc;
2766
2767 mii_pollstat(&sc->sc_mii);
2768 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2769 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2770 }
2771
2772 /*
2773 * wm_gmii_mediachange: [ifmedia interface function]
2774 *
2775 * Set hardware to newly-selected media on a 1000BASE-T device.
2776 */
2777 int
2778 wm_gmii_mediachange(struct ifnet *ifp)
2779 {
2780 struct wm_softc *sc = ifp->if_softc;
2781
2782 if (ifp->if_flags & IFF_UP)
2783 mii_mediachg(&sc->sc_mii);
2784 return (0);
2785 }
2786
2787 #define MDI_IO CTRL_SWDPIN(2)
2788 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2789 #define MDI_CLK CTRL_SWDPIN(3)
2790
2791 static void
2792 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2793 {
2794 uint32_t i, v;
2795
2796 v = CSR_READ(sc, WMREG_CTRL);
2797 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2798 v |= MDI_DIR | CTRL_SWDPIO(3);
2799
2800 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2801 if (data & i)
2802 v |= MDI_IO;
2803 else
2804 v &= ~MDI_IO;
2805 CSR_WRITE(sc, WMREG_CTRL, v);
2806 delay(10);
2807 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2808 delay(10);
2809 CSR_WRITE(sc, WMREG_CTRL, v);
2810 delay(10);
2811 }
2812 }
2813
2814 static uint32_t
2815 i82543_mii_recvbits(struct wm_softc *sc)
2816 {
2817 uint32_t v, i, data = 0;
2818
2819 v = CSR_READ(sc, WMREG_CTRL);
2820 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2821 v |= CTRL_SWDPIO(3);
2822
2823 CSR_WRITE(sc, WMREG_CTRL, v);
2824 delay(10);
2825 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2826 delay(10);
2827 CSR_WRITE(sc, WMREG_CTRL, v);
2828 delay(10);
2829
2830 for (i = 0; i < 16; i++) {
2831 data <<= 1;
2832 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2833 delay(10);
2834 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2835 data |= 1;
2836 CSR_WRITE(sc, WMREG_CTRL, v);
2837 delay(10);
2838 }
2839
2840 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2841 delay(10);
2842 CSR_WRITE(sc, WMREG_CTRL, v);
2843 delay(10);
2844
2845 return (data);
2846 }
2847
2848 #undef MDI_IO
2849 #undef MDI_DIR
2850 #undef MDI_CLK
2851
2852 /*
2853 * wm_gmii_i82543_readreg: [mii interface function]
2854 *
2855 * Read a PHY register on the GMII (i82543 version).
2856 */
2857 int
2858 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2859 {
2860 struct wm_softc *sc = (void *) self;
2861 int rv;
2862
2863 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2864 i82543_mii_sendbits(sc, reg | (phy << 5) |
2865 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2866 rv = i82543_mii_recvbits(sc) & 0xffff;
2867
2868 DPRINTF(WM_DEBUG_GMII,
2869 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2870 sc->sc_dev.dv_xname, phy, reg, rv));
2871
2872 return (rv);
2873 }
2874
2875 /*
2876 * wm_gmii_i82543_writereg: [mii interface function]
2877 *
2878 * Write a PHY register on the GMII (i82543 version).
2879 */
2880 void
2881 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2882 {
2883 struct wm_softc *sc = (void *) self;
2884
2885 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2886 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2887 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2888 (MII_COMMAND_START << 30), 32);
2889 }
2890
2891 /*
2892 * wm_gmii_i82544_readreg: [mii interface function]
2893 *
2894 * Read a PHY register on the GMII.
2895 */
2896 int
2897 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2898 {
2899 struct wm_softc *sc = (void *) self;
2900 uint32_t mdic;
2901 int i, rv;
2902
2903 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2904 MDIC_REGADD(reg));
2905
2906 for (i = 0; i < 100; i++) {
2907 mdic = CSR_READ(sc, WMREG_MDIC);
2908 if (mdic & MDIC_READY)
2909 break;
2910 delay(10);
2911 }
2912
2913 if ((mdic & MDIC_READY) == 0) {
2914 printf("%s: MDIC read timed out: phy %d reg %d\n",
2915 sc->sc_dev.dv_xname, phy, reg);
2916 rv = 0;
2917 } else if (mdic & MDIC_E) {
2918 #if 0 /* This is normal if no PHY is present. */
2919 printf("%s: MDIC read error: phy %d reg %d\n",
2920 sc->sc_dev.dv_xname, phy, reg);
2921 #endif
2922 rv = 0;
2923 } else {
2924 rv = MDIC_DATA(mdic);
2925 if (rv == 0xffff)
2926 rv = 0;
2927 }
2928
2929 return (rv);
2930 }
2931
2932 /*
2933 * wm_gmii_i82544_writereg: [mii interface function]
2934 *
2935 * Write a PHY register on the GMII.
2936 */
2937 void
2938 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2939 {
2940 struct wm_softc *sc = (void *) self;
2941 uint32_t mdic;
2942 int i;
2943
2944 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2945 MDIC_REGADD(reg) | MDIC_DATA(val));
2946
2947 for (i = 0; i < 100; i++) {
2948 mdic = CSR_READ(sc, WMREG_MDIC);
2949 if (mdic & MDIC_READY)
2950 break;
2951 delay(10);
2952 }
2953
2954 if ((mdic & MDIC_READY) == 0)
2955 printf("%s: MDIC write timed out: phy %d reg %d\n",
2956 sc->sc_dev.dv_xname, phy, reg);
2957 else if (mdic & MDIC_E)
2958 printf("%s: MDIC write error: phy %d reg %d\n",
2959 sc->sc_dev.dv_xname, phy, reg);
2960 }
2961
2962 /*
2963 * wm_gmii_statchg: [mii interface function]
2964 *
2965 * Callback from MII layer when media changes.
2966 */
2967 void
2968 wm_gmii_statchg(struct device *self)
2969 {
2970 struct wm_softc *sc = (void *) self;
2971
2972 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2973
2974 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2975 DPRINTF(WM_DEBUG_LINK,
2976 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2977 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2978 } else {
2979 DPRINTF(WM_DEBUG_LINK,
2980 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2981 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2982 }
2983
2984 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2985 }
2986