if_wm.c revision 1.31 1 /* $NetBSD: if_wm.c,v 1.31 2003/01/21 05:43:26 itojun Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Make GMII work on the i82543.
44 *
45 * - Fix hw VLAN assist.
46 *
47 * - Jumbo frames -- requires changes to network stack due to
48 * lame buffer length handling on chip.
49 */
50
51 #include "bpfilter.h"
52 #include "rnd.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/callout.h>
57 #include <sys/mbuf.h>
58 #include <sys/malloc.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/device.h>
64 #include <sys/queue.h>
65
66 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
67
68 #if NRND > 0
69 #include <sys/rnd.h>
70 #endif
71
72 #include <net/if.h>
73 #include <net/if_dl.h>
74 #include <net/if_media.h>
75 #include <net/if_ether.h>
76
77 #if NBPFILTER > 0
78 #include <net/bpf.h>
79 #endif
80
81 #include <netinet/in.h> /* XXX for struct ip */
82 #include <netinet/in_systm.h> /* XXX for struct ip */
83 #include <netinet/ip.h> /* XXX for struct ip */
84 #include <netinet/tcp.h> /* XXX for struct tcphdr */
85
86 #include <machine/bus.h>
87 #include <machine/intr.h>
88 #include <machine/endian.h>
89
90 #include <dev/mii/mii.h>
91 #include <dev/mii/miivar.h>
92 #include <dev/mii/mii_bitbang.h>
93
94 #include <dev/pci/pcireg.h>
95 #include <dev/pci/pcivar.h>
96 #include <dev/pci/pcidevs.h>
97
98 #include <dev/pci/if_wmreg.h>
99
100 #ifdef WM_DEBUG
101 #define WM_DEBUG_LINK 0x01
102 #define WM_DEBUG_TX 0x02
103 #define WM_DEBUG_RX 0x04
104 #define WM_DEBUG_GMII 0x08
105 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
106
107 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
108 #else
109 #define DPRINTF(x, y) /* nothing */
110 #endif /* WM_DEBUG */
111
112 /*
113 * Transmit descriptor list size. Due to errata, we can only have
114 * 256 hardware descriptors in the ring. We tell the upper layers
115 * that they can queue a lot of packets, and we go ahead and manage
116 * up to 64 of them at a time. We allow up to 16 DMA segments per
117 * packet.
118 */
119 #define WM_NTXSEGS 16
120 #define WM_IFQUEUELEN 256
121 #define WM_TXQUEUELEN 64
122 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
123 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * Receive descriptor list size. We have one Rx buffer for normal
131 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
132 * packet. We allocate 256 receive descriptors, each with a 2k
133 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
134 */
135 #define WM_NRXDESC 256
136 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
137 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
138 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
139
140 /*
141 * Control structures are DMA'd to the i82542 chip. We allocate them in
142 * a single clump that maps to a single DMA segment to make serveral things
143 * easier.
144 */
145 struct wm_control_data {
146 /*
147 * The transmit descriptors.
148 */
149 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
150
151 /*
152 * The receive descriptors.
153 */
154 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
155 };
156
157 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
158 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
159 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
160
161 /*
162 * Software state for transmit jobs.
163 */
164 struct wm_txsoft {
165 struct mbuf *txs_mbuf; /* head of our mbuf chain */
166 bus_dmamap_t txs_dmamap; /* our DMA map */
167 int txs_firstdesc; /* first descriptor in packet */
168 int txs_lastdesc; /* last descriptor in packet */
169 int txs_ndesc; /* # of descriptors used */
170 };
171
172 /*
173 * Software state for receive buffers. Each descriptor gets a
174 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
175 * more than one buffer, we chain them together.
176 */
177 struct wm_rxsoft {
178 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
179 bus_dmamap_t rxs_dmamap; /* our DMA map */
180 };
181
182 /*
183 * Software state per device.
184 */
185 struct wm_softc {
186 struct device sc_dev; /* generic device information */
187 bus_space_tag_t sc_st; /* bus space tag */
188 bus_space_handle_t sc_sh; /* bus space handle */
189 bus_dma_tag_t sc_dmat; /* bus DMA tag */
190 struct ethercom sc_ethercom; /* ethernet common data */
191 void *sc_sdhook; /* shutdown hook */
192
193 int sc_type; /* chip type; see below */
194 int sc_flags; /* flags; see below */
195
196 void *sc_ih; /* interrupt cookie */
197
198 struct mii_data sc_mii; /* MII/media information */
199
200 struct callout sc_tick_ch; /* tick callout */
201
202 bus_dmamap_t sc_cddmamap; /* control data DMA map */
203 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
204
205 /*
206 * Software state for the transmit and receive descriptors.
207 */
208 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
209 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
210
211 /*
212 * Control data structures.
213 */
214 struct wm_control_data *sc_control_data;
215 #define sc_txdescs sc_control_data->wcd_txdescs
216 #define sc_rxdescs sc_control_data->wcd_rxdescs
217
218 #ifdef WM_EVENT_COUNTERS
219 /* Event counters. */
220 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
221 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
222 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
223 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
224 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
225 struct evcnt sc_ev_rxintr; /* Rx interrupts */
226 struct evcnt sc_ev_linkintr; /* Link interrupts */
227
228 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
229 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
230 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
231 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
232
233 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
234 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
235 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
236
237 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
238 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
239
240 struct evcnt sc_ev_tu; /* Tx underrun */
241 #endif /* WM_EVENT_COUNTERS */
242
243 bus_addr_t sc_tdt_reg; /* offset of TDT register */
244
245 int sc_txfree; /* number of free Tx descriptors */
246 int sc_txnext; /* next ready Tx descriptor */
247
248 int sc_txsfree; /* number of free Tx jobs */
249 int sc_txsnext; /* next free Tx job */
250 int sc_txsdirty; /* dirty Tx jobs */
251
252 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
253 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
254
255 bus_addr_t sc_rdt_reg; /* offset of RDT register */
256
257 int sc_rxptr; /* next ready Rx descriptor/queue ent */
258 int sc_rxdiscard;
259 int sc_rxlen;
260 struct mbuf *sc_rxhead;
261 struct mbuf *sc_rxtail;
262 struct mbuf **sc_rxtailp;
263
264 uint32_t sc_ctrl; /* prototype CTRL register */
265 #if 0
266 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
267 #endif
268 uint32_t sc_icr; /* prototype interrupt bits */
269 uint32_t sc_tctl; /* prototype TCTL register */
270 uint32_t sc_rctl; /* prototype RCTL register */
271 uint32_t sc_txcw; /* prototype TXCW register */
272 uint32_t sc_tipg; /* prototype TIPG register */
273
274 int sc_tbi_linkup; /* TBI link status */
275 int sc_tbi_anstate; /* autonegotiation state */
276
277 int sc_mchash_type; /* multicast filter offset */
278
279 #if NRND > 0
280 rndsource_element_t rnd_source; /* random source */
281 #endif
282 };
283
284 #define WM_RXCHAIN_RESET(sc) \
285 do { \
286 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
287 *(sc)->sc_rxtailp = NULL; \
288 (sc)->sc_rxlen = 0; \
289 } while (/*CONSTCOND*/0)
290
291 #define WM_RXCHAIN_LINK(sc, m) \
292 do { \
293 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
294 (sc)->sc_rxtailp = &(m)->m_next; \
295 } while (/*CONSTCOND*/0)
296
297 /* sc_type */
298 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
299 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
300 #define WM_T_82543 2 /* i82543 */
301 #define WM_T_82544 3 /* i82544 */
302 #define WM_T_82540 4 /* i82540 */
303 #define WM_T_82545 5 /* i82545 */
304 #define WM_T_82546 6 /* i82546 */
305
306 /* sc_flags */
307 #define WM_F_HAS_MII 0x01 /* has MII */
308 #define WM_F_EEPROM_HANDSHAKE 0x02 /* requires EEPROM handshake */
309
310 #ifdef WM_EVENT_COUNTERS
311 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
312 #else
313 #define WM_EVCNT_INCR(ev) /* nothing */
314 #endif
315
316 #define CSR_READ(sc, reg) \
317 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
318 #define CSR_WRITE(sc, reg, val) \
319 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
320
321 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
322 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
323
324 #define WM_CDTXSYNC(sc, x, n, ops) \
325 do { \
326 int __x, __n; \
327 \
328 __x = (x); \
329 __n = (n); \
330 \
331 /* If it will wrap around, sync to the end of the ring. */ \
332 if ((__x + __n) > WM_NTXDESC) { \
333 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
334 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
335 (WM_NTXDESC - __x), (ops)); \
336 __n -= (WM_NTXDESC - __x); \
337 __x = 0; \
338 } \
339 \
340 /* Now sync whatever is left. */ \
341 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
342 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
343 } while (/*CONSTCOND*/0)
344
345 #define WM_CDRXSYNC(sc, x, ops) \
346 do { \
347 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
348 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
349 } while (/*CONSTCOND*/0)
350
351 #define WM_INIT_RXDESC(sc, x) \
352 do { \
353 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
354 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
355 struct mbuf *__m = __rxs->rxs_mbuf; \
356 \
357 /* \
358 * Note: We scoot the packet forward 2 bytes in the buffer \
359 * so that the payload after the Ethernet header is aligned \
360 * to a 4-byte boundary. \
361 * \
362 * XXX BRAINDAMAGE ALERT! \
363 * The stupid chip uses the same size for every buffer, which \
364 * is set in the Receive Control register. We are using the 2K \
365 * size option, but what we REALLY want is (2K - 2)! For this \
366 * reason, we can't accept packets longer than the standard \
367 * Ethernet MTU, without incurring a big penalty to copy every \
368 * incoming packet to a new, suitably aligned buffer. \
369 * \
370 * We'll need to make some changes to the layer 3/4 parts of \
371 * the stack (to copy the headers to a new buffer if not \
372 * aligned) in order to support large MTU on this chip. Lame. \
373 */ \
374 __m->m_data = __m->m_ext.ext_buf + 2; \
375 \
376 __rxd->wrx_addr.wa_low = \
377 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
378 __rxd->wrx_addr.wa_high = 0; \
379 __rxd->wrx_len = 0; \
380 __rxd->wrx_cksum = 0; \
381 __rxd->wrx_status = 0; \
382 __rxd->wrx_errors = 0; \
383 __rxd->wrx_special = 0; \
384 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
385 \
386 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
387 } while (/*CONSTCOND*/0)
388
389 void wm_start(struct ifnet *);
390 void wm_watchdog(struct ifnet *);
391 int wm_ioctl(struct ifnet *, u_long, caddr_t);
392 int wm_init(struct ifnet *);
393 void wm_stop(struct ifnet *, int);
394
395 void wm_shutdown(void *);
396
397 void wm_reset(struct wm_softc *);
398 void wm_rxdrain(struct wm_softc *);
399 int wm_add_rxbuf(struct wm_softc *, int);
400 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
401 void wm_tick(void *);
402
403 void wm_set_filter(struct wm_softc *);
404
405 int wm_intr(void *);
406 void wm_txintr(struct wm_softc *);
407 void wm_rxintr(struct wm_softc *);
408 void wm_linkintr(struct wm_softc *, uint32_t);
409
410 void wm_tbi_mediainit(struct wm_softc *);
411 int wm_tbi_mediachange(struct ifnet *);
412 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
413
414 void wm_tbi_set_linkled(struct wm_softc *);
415 void wm_tbi_check_link(struct wm_softc *);
416
417 void wm_gmii_reset(struct wm_softc *);
418
419 int wm_gmii_i82543_readreg(struct device *, int, int);
420 void wm_gmii_i82543_writereg(struct device *, int, int, int);
421
422 int wm_gmii_i82544_readreg(struct device *, int, int);
423 void wm_gmii_i82544_writereg(struct device *, int, int, int);
424
425 void wm_gmii_statchg(struct device *);
426
427 void wm_gmii_mediainit(struct wm_softc *);
428 int wm_gmii_mediachange(struct ifnet *);
429 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
430
431 int wm_match(struct device *, struct cfdata *, void *);
432 void wm_attach(struct device *, struct device *, void *);
433
434 CFATTACH_DECL(wm, sizeof(struct wm_softc),
435 wm_match, wm_attach, NULL, NULL);
436
437 /*
438 * Devices supported by this driver.
439 */
440 const struct wm_product {
441 pci_vendor_id_t wmp_vendor;
442 pci_product_id_t wmp_product;
443 const char *wmp_name;
444 int wmp_type;
445 int wmp_flags;
446 #define WMP_F_1000X 0x01
447 #define WMP_F_1000T 0x02
448 } wm_products[] = {
449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
450 "Intel i82542 1000BASE-X Ethernet",
451 WM_T_82542_2_1, WMP_F_1000X },
452
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
454 "Intel i82543GC 1000BASE-X Ethernet",
455 WM_T_82543, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
458 "Intel i82543GC 1000BASE-T Ethernet",
459 WM_T_82543, WMP_F_1000T },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
462 "Intel i82544EI 1000BASE-T Ethernet",
463 WM_T_82544, WMP_F_1000T },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
466 "Intel i82544EI 1000BASE-X Ethernet",
467 WM_T_82544, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
470 "Intel i82544GC 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
474 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
475 WM_T_82544, WMP_F_1000T },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM,
478 "Intel i82540EM 1000BASE-T Ethernet",
479 WM_T_82540, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_COPPER,
482 "Intel i82545EM 1000BASE-T Ethernet",
483 WM_T_82545, WMP_F_1000T },
484
485 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_COPPER,
486 "Intel i82546EB 1000BASE-T Ethernet",
487 WM_T_82546, WMP_F_1000T },
488
489 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82545EM_FIBER,
490 "Intel i82545EM 1000BASE-X Ethernet",
491 WM_T_82545, WMP_F_1000X },
492
493 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82546EB_FIBER,
494 "Intel i82546EB 1000BASE-X Ethernet",
495 WM_T_82546, WMP_F_1000X },
496
497 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82540EM_LOM,
498 "Intel i82540EM (LOM) 1000BASE-T Ethernet",
499 WM_T_82540, WMP_F_1000T },
500
501 { 0, 0,
502 NULL,
503 0, 0 },
504 };
505
506 #ifdef WM_EVENT_COUNTERS
507 #if WM_NTXSEGS != 16
508 #error Update wm_txseg_evcnt_names
509 #endif
510 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
511 "txseg1",
512 "txseg2",
513 "txseg3",
514 "txseg4",
515 "txseg5",
516 "txseg6",
517 "txseg7",
518 "txseg8",
519 "txseg9",
520 "txseg10",
521 "txseg11",
522 "txseg12",
523 "txseg13",
524 "txseg14",
525 "txseg15",
526 "txseg16",
527 };
528 #endif /* WM_EVENT_COUNTERS */
529
530 static const struct wm_product *
531 wm_lookup(const struct pci_attach_args *pa)
532 {
533 const struct wm_product *wmp;
534
535 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
536 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
537 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
538 return (wmp);
539 }
540 return (NULL);
541 }
542
543 int
544 wm_match(struct device *parent, struct cfdata *cf, void *aux)
545 {
546 struct pci_attach_args *pa = aux;
547
548 if (wm_lookup(pa) != NULL)
549 return (1);
550
551 return (0);
552 }
553
554 void
555 wm_attach(struct device *parent, struct device *self, void *aux)
556 {
557 struct wm_softc *sc = (void *) self;
558 struct pci_attach_args *pa = aux;
559 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
560 pci_chipset_tag_t pc = pa->pa_pc;
561 pci_intr_handle_t ih;
562 const char *intrstr = NULL;
563 bus_space_tag_t memt;
564 bus_space_handle_t memh;
565 bus_dma_segment_t seg;
566 int memh_valid;
567 int i, rseg, error;
568 const struct wm_product *wmp;
569 uint8_t enaddr[ETHER_ADDR_LEN];
570 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
571 pcireg_t preg, memtype;
572 int pmreg;
573
574 callout_init(&sc->sc_tick_ch);
575
576 wmp = wm_lookup(pa);
577 if (wmp == NULL) {
578 printf("\n");
579 panic("wm_attach: impossible");
580 }
581
582 sc->sc_dmat = pa->pa_dmat;
583
584 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
585 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
586
587 sc->sc_type = wmp->wmp_type;
588 if (sc->sc_type < WM_T_82543) {
589 if (preg < 2) {
590 printf("%s: i82542 must be at least rev. 2\n",
591 sc->sc_dev.dv_xname);
592 return;
593 }
594 if (preg < 3)
595 sc->sc_type = WM_T_82542_2_0;
596 }
597
598 /*
599 * Some chips require a handshake to access the EEPROM.
600 */
601 if (sc->sc_type >= WM_T_82540)
602 sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
603
604 /*
605 * Map the device.
606 */
607 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
608 switch (memtype) {
609 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
610 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
611 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
612 memtype, 0, &memt, &memh, NULL, NULL) == 0);
613 break;
614 default:
615 memh_valid = 0;
616 }
617
618 if (memh_valid) {
619 sc->sc_st = memt;
620 sc->sc_sh = memh;
621 } else {
622 printf("%s: unable to map device registers\n",
623 sc->sc_dev.dv_xname);
624 return;
625 }
626
627 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
628 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
629 preg |= PCI_COMMAND_MASTER_ENABLE;
630 if (sc->sc_type < WM_T_82542_2_1)
631 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
632 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
633
634 /* Get it out of power save mode, if needed. */
635 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
636 preg = pci_conf_read(pc, pa->pa_tag, pmreg + PCI_PMCSR) &
637 PCI_PMCSR_STATE_MASK;
638 if (preg == PCI_PMCSR_STATE_D3) {
639 /*
640 * The card has lost all configuration data in
641 * this state, so punt.
642 */
643 printf("%s: unable to wake from power state D3\n",
644 sc->sc_dev.dv_xname);
645 return;
646 }
647 if (preg != PCI_PMCSR_STATE_D0) {
648 printf("%s: waking up from power state D%d\n",
649 sc->sc_dev.dv_xname, preg);
650 pci_conf_write(pc, pa->pa_tag, pmreg + PCI_PMCSR,
651 PCI_PMCSR_STATE_D0);
652 }
653 }
654
655 /*
656 * Map and establish our interrupt.
657 */
658 if (pci_intr_map(pa, &ih)) {
659 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
660 return;
661 }
662 intrstr = pci_intr_string(pc, ih);
663 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
664 if (sc->sc_ih == NULL) {
665 printf("%s: unable to establish interrupt",
666 sc->sc_dev.dv_xname);
667 if (intrstr != NULL)
668 printf(" at %s", intrstr);
669 printf("\n");
670 return;
671 }
672 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
673
674 /*
675 * Allocate the control data structures, and create and load the
676 * DMA map for it.
677 */
678 if ((error = bus_dmamem_alloc(sc->sc_dmat,
679 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
680 0)) != 0) {
681 printf("%s: unable to allocate control data, error = %d\n",
682 sc->sc_dev.dv_xname, error);
683 goto fail_0;
684 }
685
686 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
687 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
688 0)) != 0) {
689 printf("%s: unable to map control data, error = %d\n",
690 sc->sc_dev.dv_xname, error);
691 goto fail_1;
692 }
693
694 if ((error = bus_dmamap_create(sc->sc_dmat,
695 sizeof(struct wm_control_data), 1,
696 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
697 printf("%s: unable to create control data DMA map, "
698 "error = %d\n", sc->sc_dev.dv_xname, error);
699 goto fail_2;
700 }
701
702 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
703 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
704 0)) != 0) {
705 printf("%s: unable to load control data DMA map, error = %d\n",
706 sc->sc_dev.dv_xname, error);
707 goto fail_3;
708 }
709
710 /*
711 * Create the transmit buffer DMA maps.
712 */
713 for (i = 0; i < WM_TXQUEUELEN; i++) {
714 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
715 WM_NTXSEGS, MCLBYTES, 0, 0,
716 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
717 printf("%s: unable to create Tx DMA map %d, "
718 "error = %d\n", sc->sc_dev.dv_xname, i, error);
719 goto fail_4;
720 }
721 }
722
723 /*
724 * Create the receive buffer DMA maps.
725 */
726 for (i = 0; i < WM_NRXDESC; i++) {
727 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
728 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
729 printf("%s: unable to create Rx DMA map %d, "
730 "error = %d\n", sc->sc_dev.dv_xname, i, error);
731 goto fail_5;
732 }
733 sc->sc_rxsoft[i].rxs_mbuf = NULL;
734 }
735
736 /*
737 * Reset the chip to a known state.
738 */
739 wm_reset(sc);
740
741 /*
742 * Read the Ethernet address from the EEPROM.
743 */
744 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
745 sizeof(myea) / sizeof(myea[0]), myea);
746 enaddr[0] = myea[0] & 0xff;
747 enaddr[1] = myea[0] >> 8;
748 enaddr[2] = myea[1] & 0xff;
749 enaddr[3] = myea[1] >> 8;
750 enaddr[4] = myea[2] & 0xff;
751 enaddr[5] = myea[2] >> 8;
752
753 /*
754 * Toggle the LSB of the MAC address on the second port
755 * of the i82546.
756 */
757 if (sc->sc_type == WM_T_82546) {
758 if ((CSR_READ(sc, WMREG_STATUS) >> STATUS_FUNCID_SHIFT) & 1)
759 enaddr[5] ^= 1;
760 }
761
762 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
763 ether_sprintf(enaddr));
764
765 /*
766 * Read the config info from the EEPROM, and set up various
767 * bits in the control registers based on their contents.
768 */
769 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
770 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
771 if (sc->sc_type >= WM_T_82544)
772 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
773
774 if (cfg1 & EEPROM_CFG1_ILOS)
775 sc->sc_ctrl |= CTRL_ILOS;
776 if (sc->sc_type >= WM_T_82544) {
777 sc->sc_ctrl |=
778 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
779 CTRL_SWDPIO_SHIFT;
780 sc->sc_ctrl |=
781 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
782 CTRL_SWDPINS_SHIFT;
783 } else {
784 sc->sc_ctrl |=
785 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
786 CTRL_SWDPIO_SHIFT;
787 }
788
789 #if 0
790 if (sc->sc_type >= WM_T_82544) {
791 if (cfg1 & EEPROM_CFG1_IPS0)
792 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
793 if (cfg1 & EEPROM_CFG1_IPS1)
794 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
795 sc->sc_ctrl_ext |=
796 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
797 CTRL_EXT_SWDPIO_SHIFT;
798 sc->sc_ctrl_ext |=
799 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
800 CTRL_EXT_SWDPINS_SHIFT;
801 } else {
802 sc->sc_ctrl_ext |=
803 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
804 CTRL_EXT_SWDPIO_SHIFT;
805 }
806 #endif
807
808 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
809 #if 0
810 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
811 #endif
812
813 /*
814 * Set up some register offsets that are different between
815 * the i82542 and the i82543 and later chips.
816 */
817 if (sc->sc_type < WM_T_82543) {
818 sc->sc_rdt_reg = WMREG_OLD_RDT0;
819 sc->sc_tdt_reg = WMREG_OLD_TDT;
820 } else {
821 sc->sc_rdt_reg = WMREG_RDT;
822 sc->sc_tdt_reg = WMREG_TDT;
823 }
824
825 /*
826 * Determine if we should use flow control. We should
827 * always use it, unless we're on a i82542 < 2.1.
828 */
829 if (sc->sc_type >= WM_T_82542_2_1)
830 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
831
832 /*
833 * Determine if we're TBI or GMII mode, and initialize the
834 * media structures accordingly.
835 */
836 if (sc->sc_type < WM_T_82543 ||
837 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
838 if (wmp->wmp_flags & WMP_F_1000T)
839 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
840 "product!\n", sc->sc_dev.dv_xname);
841 wm_tbi_mediainit(sc);
842 } else {
843 if (wmp->wmp_flags & WMP_F_1000X)
844 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
845 "product!\n", sc->sc_dev.dv_xname);
846 wm_gmii_mediainit(sc);
847 }
848
849 ifp = &sc->sc_ethercom.ec_if;
850 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
851 ifp->if_softc = sc;
852 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
853 ifp->if_ioctl = wm_ioctl;
854 ifp->if_start = wm_start;
855 ifp->if_watchdog = wm_watchdog;
856 ifp->if_init = wm_init;
857 ifp->if_stop = wm_stop;
858 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
859 IFQ_SET_READY(&ifp->if_snd);
860
861 /*
862 * If we're a i82543 or greater, we can support VLANs.
863 */
864 if (sc->sc_type >= WM_T_82543)
865 sc->sc_ethercom.ec_capabilities |=
866 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
867
868 /*
869 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
870 * on i82543 and later.
871 */
872 if (sc->sc_type >= WM_T_82543)
873 ifp->if_capabilities |=
874 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
875
876 /*
877 * Attach the interface.
878 */
879 if_attach(ifp);
880 ether_ifattach(ifp, enaddr);
881 #if NRND > 0
882 rnd_attach_source(&sc->rnd_source, sc->sc_dev.dv_xname,
883 RND_TYPE_NET, 0);
884 #endif
885
886 #ifdef WM_EVENT_COUNTERS
887 /* Attach event counters. */
888 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
889 NULL, sc->sc_dev.dv_xname, "txsstall");
890 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
891 NULL, sc->sc_dev.dv_xname, "txdstall");
892 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
893 NULL, sc->sc_dev.dv_xname, "txforceintr");
894 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
895 NULL, sc->sc_dev.dv_xname, "txdw");
896 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
897 NULL, sc->sc_dev.dv_xname, "txqe");
898 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
899 NULL, sc->sc_dev.dv_xname, "rxintr");
900 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
901 NULL, sc->sc_dev.dv_xname, "linkintr");
902
903 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
904 NULL, sc->sc_dev.dv_xname, "rxipsum");
905 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
906 NULL, sc->sc_dev.dv_xname, "rxtusum");
907 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
908 NULL, sc->sc_dev.dv_xname, "txipsum");
909 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
910 NULL, sc->sc_dev.dv_xname, "txtusum");
911
912 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
913 NULL, sc->sc_dev.dv_xname, "txctx init");
914 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
915 NULL, sc->sc_dev.dv_xname, "txctx hit");
916 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
917 NULL, sc->sc_dev.dv_xname, "txctx miss");
918
919 for (i = 0; i < WM_NTXSEGS; i++)
920 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
921 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
922
923 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
924 NULL, sc->sc_dev.dv_xname, "txdrop");
925
926 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
927 NULL, sc->sc_dev.dv_xname, "tu");
928 #endif /* WM_EVENT_COUNTERS */
929
930 /*
931 * Make sure the interface is shutdown during reboot.
932 */
933 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
934 if (sc->sc_sdhook == NULL)
935 printf("%s: WARNING: unable to establish shutdown hook\n",
936 sc->sc_dev.dv_xname);
937 return;
938
939 /*
940 * Free any resources we've allocated during the failed attach
941 * attempt. Do this in reverse order and fall through.
942 */
943 fail_5:
944 for (i = 0; i < WM_NRXDESC; i++) {
945 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
946 bus_dmamap_destroy(sc->sc_dmat,
947 sc->sc_rxsoft[i].rxs_dmamap);
948 }
949 fail_4:
950 for (i = 0; i < WM_TXQUEUELEN; i++) {
951 if (sc->sc_txsoft[i].txs_dmamap != NULL)
952 bus_dmamap_destroy(sc->sc_dmat,
953 sc->sc_txsoft[i].txs_dmamap);
954 }
955 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
956 fail_3:
957 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
958 fail_2:
959 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
960 sizeof(struct wm_control_data));
961 fail_1:
962 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
963 fail_0:
964 return;
965 }
966
967 /*
968 * wm_shutdown:
969 *
970 * Make sure the interface is stopped at reboot time.
971 */
972 void
973 wm_shutdown(void *arg)
974 {
975 struct wm_softc *sc = arg;
976
977 wm_stop(&sc->sc_ethercom.ec_if, 1);
978 }
979
980 /*
981 * wm_tx_cksum:
982 *
983 * Set up TCP/IP checksumming parameters for the
984 * specified packet.
985 */
986 static int
987 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
988 uint32_t *fieldsp)
989 {
990 struct mbuf *m0 = txs->txs_mbuf;
991 struct livengood_tcpip_ctxdesc *t;
992 uint32_t fields = 0, ipcs, tucs;
993 struct ip *ip;
994 struct ether_header *eh;
995 int offset, iphl;
996
997 /*
998 * XXX It would be nice if the mbuf pkthdr had offset
999 * fields for the protocol headers.
1000 */
1001
1002 eh = mtod(m0, struct ether_header *);
1003 switch (htons(eh->ether_type)) {
1004 case ETHERTYPE_IP:
1005 iphl = sizeof(struct ip);
1006 offset = ETHER_HDR_LEN;
1007 break;
1008
1009 default:
1010 /*
1011 * Don't support this protocol or encapsulation.
1012 */
1013 *fieldsp = 0;
1014 *cmdp = 0;
1015 return (0);
1016 }
1017
1018 /* XXX */
1019 if (m0->m_len < (offset + iphl)) {
1020 printf("%s: wm_tx_cksum: need to m_pullup, "
1021 "packet dropped\n", sc->sc_dev.dv_xname);
1022 return (EINVAL);
1023 }
1024
1025 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
1026 iphl = ip->ip_hl << 2;
1027
1028 /*
1029 * NOTE: Even if we're not using the IP or TCP/UDP checksum
1030 * offload feature, if we load the context descriptor, we
1031 * MUST provide valid values for IPCSS and TUCSS fields.
1032 */
1033
1034 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
1035 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
1036 fields |= htole32(WTX_IXSM);
1037 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1038 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1039 WTX_TCPIP_IPCSE(offset + iphl - 1));
1040 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
1041 /* Use the cached value. */
1042 ipcs = sc->sc_txctx_ipcs;
1043 } else {
1044 /* Just initialize it to the likely value anyway. */
1045 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1046 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1047 WTX_TCPIP_IPCSE(offset + iphl - 1));
1048 }
1049
1050 offset += iphl;
1051
1052 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1053 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1054 fields |= htole32(WTX_TXSM);
1055 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1056 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1057 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1058 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1059 /* Use the cached value. */
1060 tucs = sc->sc_txctx_tucs;
1061 } else {
1062 /* Just initialize it to a valid TCP context. */
1063 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1064 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1065 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1066 }
1067
1068 if (sc->sc_txctx_ipcs == ipcs &&
1069 sc->sc_txctx_tucs == tucs) {
1070 /* Cached context is fine. */
1071 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1072 } else {
1073 /* Fill in the context descriptor. */
1074 #ifdef WM_EVENT_COUNTERS
1075 if (sc->sc_txctx_ipcs == 0xffffffff &&
1076 sc->sc_txctx_tucs == 0xffffffff)
1077 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1078 else
1079 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1080 #endif
1081 t = (struct livengood_tcpip_ctxdesc *)
1082 &sc->sc_txdescs[sc->sc_txnext];
1083 t->tcpip_ipcs = ipcs;
1084 t->tcpip_tucs = tucs;
1085 t->tcpip_cmdlen =
1086 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1087 t->tcpip_seg = 0;
1088 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1089
1090 sc->sc_txctx_ipcs = ipcs;
1091 sc->sc_txctx_tucs = tucs;
1092
1093 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1094 txs->txs_ndesc++;
1095 }
1096
1097 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1098 *fieldsp = fields;
1099
1100 return (0);
1101 }
1102
1103 /*
1104 * wm_start: [ifnet interface function]
1105 *
1106 * Start packet transmission on the interface.
1107 */
1108 void
1109 wm_start(struct ifnet *ifp)
1110 {
1111 struct wm_softc *sc = ifp->if_softc;
1112 struct mbuf *m0;
1113 #if 0 /* XXXJRT */
1114 struct m_tag *mtag;
1115 #endif
1116 struct wm_txsoft *txs;
1117 bus_dmamap_t dmamap;
1118 int error, nexttx, lasttx, ofree, seg;
1119 uint32_t cksumcmd, cksumfields;
1120
1121 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1122 return;
1123
1124 /*
1125 * Remember the previous number of free descriptors.
1126 */
1127 ofree = sc->sc_txfree;
1128
1129 /*
1130 * Loop through the send queue, setting up transmit descriptors
1131 * until we drain the queue, or use up all available transmit
1132 * descriptors.
1133 */
1134 for (;;) {
1135 /* Grab a packet off the queue. */
1136 IFQ_POLL(&ifp->if_snd, m0);
1137 if (m0 == NULL)
1138 break;
1139
1140 DPRINTF(WM_DEBUG_TX,
1141 ("%s: TX: have packet to transmit: %p\n",
1142 sc->sc_dev.dv_xname, m0));
1143
1144 /* Get a work queue entry. */
1145 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1146 wm_txintr(sc);
1147 if (sc->sc_txsfree == 0) {
1148 DPRINTF(WM_DEBUG_TX,
1149 ("%s: TX: no free job descriptors\n",
1150 sc->sc_dev.dv_xname));
1151 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1152 break;
1153 }
1154 }
1155
1156 txs = &sc->sc_txsoft[sc->sc_txsnext];
1157 dmamap = txs->txs_dmamap;
1158
1159 /*
1160 * Load the DMA map. If this fails, the packet either
1161 * didn't fit in the allotted number of segments, or we
1162 * were short on resources. For the too-many-segments
1163 * case, we simply report an error and drop the packet,
1164 * since we can't sanely copy a jumbo packet to a single
1165 * buffer.
1166 */
1167 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1168 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1169 if (error) {
1170 if (error == EFBIG) {
1171 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1172 printf("%s: Tx packet consumes too many "
1173 "DMA segments, dropping...\n",
1174 sc->sc_dev.dv_xname);
1175 IFQ_DEQUEUE(&ifp->if_snd, m0);
1176 m_freem(m0);
1177 continue;
1178 }
1179 /*
1180 * Short on resources, just stop for now.
1181 */
1182 DPRINTF(WM_DEBUG_TX,
1183 ("%s: TX: dmamap load failed: %d\n",
1184 sc->sc_dev.dv_xname, error));
1185 break;
1186 }
1187
1188 /*
1189 * Ensure we have enough descriptors free to describe
1190 * the packet. Note, we always reserve one descriptor
1191 * at the end of the ring due to the semantics of the
1192 * TDT register, plus one more in the event we need
1193 * to re-load checksum offload context.
1194 */
1195 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1196 /*
1197 * Not enough free descriptors to transmit this
1198 * packet. We haven't committed anything yet,
1199 * so just unload the DMA map, put the packet
1200 * pack on the queue, and punt. Notify the upper
1201 * layer that there are no more slots left.
1202 */
1203 DPRINTF(WM_DEBUG_TX,
1204 ("%s: TX: need %d descriptors, have %d\n",
1205 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1206 sc->sc_txfree - 1));
1207 ifp->if_flags |= IFF_OACTIVE;
1208 bus_dmamap_unload(sc->sc_dmat, dmamap);
1209 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1210 break;
1211 }
1212
1213 IFQ_DEQUEUE(&ifp->if_snd, m0);
1214
1215 /*
1216 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1217 */
1218
1219 /* Sync the DMA map. */
1220 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1221 BUS_DMASYNC_PREWRITE);
1222
1223 DPRINTF(WM_DEBUG_TX,
1224 ("%s: TX: packet has %d DMA segments\n",
1225 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1226
1227 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1228
1229 /*
1230 * Store a pointer to the packet so that we can free it
1231 * later.
1232 *
1233 * Initially, we consider the number of descriptors the
1234 * packet uses the number of DMA segments. This may be
1235 * incremented by 1 if we do checksum offload (a descriptor
1236 * is used to set the checksum context).
1237 */
1238 txs->txs_mbuf = m0;
1239 txs->txs_firstdesc = sc->sc_txnext;
1240 txs->txs_ndesc = dmamap->dm_nsegs;
1241
1242 /*
1243 * Set up checksum offload parameters for
1244 * this packet.
1245 */
1246 if (m0->m_pkthdr.csum_flags &
1247 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1248 if (wm_tx_cksum(sc, txs, &cksumcmd,
1249 &cksumfields) != 0) {
1250 /* Error message already displayed. */
1251 m_freem(m0);
1252 bus_dmamap_unload(sc->sc_dmat, dmamap);
1253 txs->txs_mbuf = NULL;
1254 continue;
1255 }
1256 } else {
1257 cksumcmd = 0;
1258 cksumfields = 0;
1259 }
1260
1261 cksumcmd |= htole32(WTX_CMD_IDE);
1262
1263 /*
1264 * Initialize the transmit descriptor.
1265 */
1266 for (nexttx = sc->sc_txnext, seg = 0;
1267 seg < dmamap->dm_nsegs;
1268 seg++, nexttx = WM_NEXTTX(nexttx)) {
1269 /*
1270 * Note: we currently only use 32-bit DMA
1271 * addresses.
1272 */
1273 sc->sc_txdescs[nexttx].wtx_addr.wa_high = 0;
1274 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1275 htole32(dmamap->dm_segs[seg].ds_addr);
1276 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1277 htole32(dmamap->dm_segs[seg].ds_len);
1278 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1279 cksumfields;
1280 lasttx = nexttx;
1281
1282 DPRINTF(WM_DEBUG_TX,
1283 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1284 sc->sc_dev.dv_xname, nexttx,
1285 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1286 (uint32_t) dmamap->dm_segs[seg].ds_len));
1287 }
1288
1289 /*
1290 * Set up the command byte on the last descriptor of
1291 * the packet. If we're in the interrupt delay window,
1292 * delay the interrupt.
1293 */
1294 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1295 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1296
1297 #if 0 /* XXXJRT */
1298 /*
1299 * If VLANs are enabled and the packet has a VLAN tag, set
1300 * up the descriptor to encapsulate the packet for us.
1301 *
1302 * This is only valid on the last descriptor of the packet.
1303 */
1304 if (sc->sc_ethercom.ec_nvlans != 0 &&
1305 (mtag = m_tag_find(m0, PACKET_TAG_VLAN, NULL)) != NULL) {
1306 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1307 htole32(WTX_CMD_VLE);
1308 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1309 = htole16(*(u_int *)(mtag + 1) & 0xffff);
1310 }
1311 #endif /* XXXJRT */
1312
1313 txs->txs_lastdesc = lasttx;
1314
1315 DPRINTF(WM_DEBUG_TX,
1316 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1317 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1318
1319 /* Sync the descriptors we're using. */
1320 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1321 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1322
1323 /* Give the packet to the chip. */
1324 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1325
1326 DPRINTF(WM_DEBUG_TX,
1327 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1328
1329 DPRINTF(WM_DEBUG_TX,
1330 ("%s: TX: finished transmitting packet, job %d\n",
1331 sc->sc_dev.dv_xname, sc->sc_txsnext));
1332
1333 /* Advance the tx pointer. */
1334 sc->sc_txfree -= txs->txs_ndesc;
1335 sc->sc_txnext = nexttx;
1336
1337 sc->sc_txsfree--;
1338 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1339
1340 #if NBPFILTER > 0
1341 /* Pass the packet to any BPF listeners. */
1342 if (ifp->if_bpf)
1343 bpf_mtap(ifp->if_bpf, m0);
1344 #endif /* NBPFILTER > 0 */
1345 }
1346
1347 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1348 /* No more slots; notify upper layer. */
1349 ifp->if_flags |= IFF_OACTIVE;
1350 }
1351
1352 if (sc->sc_txfree != ofree) {
1353 /* Set a watchdog timer in case the chip flakes out. */
1354 ifp->if_timer = 5;
1355 }
1356 }
1357
1358 /*
1359 * wm_watchdog: [ifnet interface function]
1360 *
1361 * Watchdog timer handler.
1362 */
1363 void
1364 wm_watchdog(struct ifnet *ifp)
1365 {
1366 struct wm_softc *sc = ifp->if_softc;
1367
1368 /*
1369 * Since we're using delayed interrupts, sweep up
1370 * before we report an error.
1371 */
1372 wm_txintr(sc);
1373
1374 if (sc->sc_txfree != WM_NTXDESC) {
1375 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1376 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1377 sc->sc_txnext);
1378 ifp->if_oerrors++;
1379
1380 /* Reset the interface. */
1381 (void) wm_init(ifp);
1382 }
1383
1384 /* Try to get more packets going. */
1385 wm_start(ifp);
1386 }
1387
1388 /*
1389 * wm_ioctl: [ifnet interface function]
1390 *
1391 * Handle control requests from the operator.
1392 */
1393 int
1394 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1395 {
1396 struct wm_softc *sc = ifp->if_softc;
1397 struct ifreq *ifr = (struct ifreq *) data;
1398 int s, error;
1399
1400 s = splnet();
1401
1402 switch (cmd) {
1403 case SIOCSIFMEDIA:
1404 case SIOCGIFMEDIA:
1405 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1406 break;
1407
1408 default:
1409 error = ether_ioctl(ifp, cmd, data);
1410 if (error == ENETRESET) {
1411 /*
1412 * Multicast list has changed; set the hardware filter
1413 * accordingly.
1414 */
1415 wm_set_filter(sc);
1416 error = 0;
1417 }
1418 break;
1419 }
1420
1421 /* Try to get more packets going. */
1422 wm_start(ifp);
1423
1424 splx(s);
1425 return (error);
1426 }
1427
1428 /*
1429 * wm_intr:
1430 *
1431 * Interrupt service routine.
1432 */
1433 int
1434 wm_intr(void *arg)
1435 {
1436 struct wm_softc *sc = arg;
1437 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1438 uint32_t icr;
1439 int wantinit, handled = 0;
1440
1441 for (wantinit = 0; wantinit == 0;) {
1442 icr = CSR_READ(sc, WMREG_ICR);
1443 if ((icr & sc->sc_icr) == 0)
1444 break;
1445
1446 #if 0 /*NRND > 0*/
1447 if (RND_ENABLED(&sc->rnd_source))
1448 rnd_add_uint32(&sc->rnd_source, icr);
1449 #endif
1450
1451 handled = 1;
1452
1453 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1454 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1455 DPRINTF(WM_DEBUG_RX,
1456 ("%s: RX: got Rx intr 0x%08x\n",
1457 sc->sc_dev.dv_xname,
1458 icr & (ICR_RXDMT0|ICR_RXT0)));
1459 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1460 }
1461 #endif
1462 wm_rxintr(sc);
1463
1464 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1465 if (icr & ICR_TXDW) {
1466 DPRINTF(WM_DEBUG_TX,
1467 ("%s: TX: got TDXW interrupt\n",
1468 sc->sc_dev.dv_xname));
1469 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1470 }
1471 #endif
1472 wm_txintr(sc);
1473
1474 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1475 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1476 wm_linkintr(sc, icr);
1477 }
1478
1479 if (icr & ICR_RXO) {
1480 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1481 wantinit = 1;
1482 }
1483 }
1484
1485 if (handled) {
1486 if (wantinit)
1487 wm_init(ifp);
1488
1489 /* Try to get more packets going. */
1490 wm_start(ifp);
1491 }
1492
1493 return (handled);
1494 }
1495
1496 /*
1497 * wm_txintr:
1498 *
1499 * Helper; handle transmit interrupts.
1500 */
1501 void
1502 wm_txintr(struct wm_softc *sc)
1503 {
1504 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1505 struct wm_txsoft *txs;
1506 uint8_t status;
1507 int i;
1508
1509 ifp->if_flags &= ~IFF_OACTIVE;
1510
1511 /*
1512 * Go through the Tx list and free mbufs for those
1513 * frames which have been transmitted.
1514 */
1515 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1516 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1517 txs = &sc->sc_txsoft[i];
1518
1519 DPRINTF(WM_DEBUG_TX,
1520 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1521
1522 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1523 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1524
1525 status = le32toh(sc->sc_txdescs[
1526 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1527 if ((status & WTX_ST_DD) == 0) {
1528 WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
1529 BUS_DMASYNC_PREREAD);
1530 break;
1531 }
1532
1533 DPRINTF(WM_DEBUG_TX,
1534 ("%s: TX: job %d done: descs %d..%d\n",
1535 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1536 txs->txs_lastdesc));
1537
1538 /*
1539 * XXX We should probably be using the statistics
1540 * XXX registers, but I don't know if they exist
1541 * XXX on chips before the i82544.
1542 */
1543
1544 #ifdef WM_EVENT_COUNTERS
1545 if (status & WTX_ST_TU)
1546 WM_EVCNT_INCR(&sc->sc_ev_tu);
1547 #endif /* WM_EVENT_COUNTERS */
1548
1549 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1550 ifp->if_oerrors++;
1551 if (status & WTX_ST_LC)
1552 printf("%s: late collision\n",
1553 sc->sc_dev.dv_xname);
1554 else if (status & WTX_ST_EC) {
1555 ifp->if_collisions += 16;
1556 printf("%s: excessive collisions\n",
1557 sc->sc_dev.dv_xname);
1558 }
1559 } else
1560 ifp->if_opackets++;
1561
1562 sc->sc_txfree += txs->txs_ndesc;
1563 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1564 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1565 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1566 m_freem(txs->txs_mbuf);
1567 txs->txs_mbuf = NULL;
1568 }
1569
1570 /* Update the dirty transmit buffer pointer. */
1571 sc->sc_txsdirty = i;
1572 DPRINTF(WM_DEBUG_TX,
1573 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1574
1575 /*
1576 * If there are no more pending transmissions, cancel the watchdog
1577 * timer.
1578 */
1579 if (sc->sc_txsfree == WM_TXQUEUELEN)
1580 ifp->if_timer = 0;
1581 }
1582
1583 /*
1584 * wm_rxintr:
1585 *
1586 * Helper; handle receive interrupts.
1587 */
1588 void
1589 wm_rxintr(struct wm_softc *sc)
1590 {
1591 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1592 struct wm_rxsoft *rxs;
1593 struct mbuf *m;
1594 int i, len;
1595 uint8_t status, errors;
1596
1597 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1598 rxs = &sc->sc_rxsoft[i];
1599
1600 DPRINTF(WM_DEBUG_RX,
1601 ("%s: RX: checking descriptor %d\n",
1602 sc->sc_dev.dv_xname, i));
1603
1604 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1605
1606 status = sc->sc_rxdescs[i].wrx_status;
1607 errors = sc->sc_rxdescs[i].wrx_errors;
1608 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1609
1610 if ((status & WRX_ST_DD) == 0) {
1611 /*
1612 * We have processed all of the receive descriptors.
1613 */
1614 WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
1615 break;
1616 }
1617
1618 if (__predict_false(sc->sc_rxdiscard)) {
1619 DPRINTF(WM_DEBUG_RX,
1620 ("%s: RX: discarding contents of descriptor %d\n",
1621 sc->sc_dev.dv_xname, i));
1622 WM_INIT_RXDESC(sc, i);
1623 if (status & WRX_ST_EOP) {
1624 /* Reset our state. */
1625 DPRINTF(WM_DEBUG_RX,
1626 ("%s: RX: resetting rxdiscard -> 0\n",
1627 sc->sc_dev.dv_xname));
1628 sc->sc_rxdiscard = 0;
1629 }
1630 continue;
1631 }
1632
1633 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1634 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1635
1636 m = rxs->rxs_mbuf;
1637
1638 /*
1639 * Add a new receive buffer to the ring.
1640 */
1641 if (wm_add_rxbuf(sc, i) != 0) {
1642 /*
1643 * Failed, throw away what we've done so
1644 * far, and discard the rest of the packet.
1645 */
1646 ifp->if_ierrors++;
1647 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1648 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1649 WM_INIT_RXDESC(sc, i);
1650 if ((status & WRX_ST_EOP) == 0)
1651 sc->sc_rxdiscard = 1;
1652 if (sc->sc_rxhead != NULL)
1653 m_freem(sc->sc_rxhead);
1654 WM_RXCHAIN_RESET(sc);
1655 DPRINTF(WM_DEBUG_RX,
1656 ("%s: RX: Rx buffer allocation failed, "
1657 "dropping packet%s\n", sc->sc_dev.dv_xname,
1658 sc->sc_rxdiscard ? " (discard)" : ""));
1659 continue;
1660 }
1661
1662 WM_RXCHAIN_LINK(sc, m);
1663
1664 m->m_len = len;
1665
1666 DPRINTF(WM_DEBUG_RX,
1667 ("%s: RX: buffer at %p len %d\n",
1668 sc->sc_dev.dv_xname, m->m_data, len));
1669
1670 /*
1671 * If this is not the end of the packet, keep
1672 * looking.
1673 */
1674 if ((status & WRX_ST_EOP) == 0) {
1675 sc->sc_rxlen += len;
1676 DPRINTF(WM_DEBUG_RX,
1677 ("%s: RX: not yet EOP, rxlen -> %d\n",
1678 sc->sc_dev.dv_xname, sc->sc_rxlen));
1679 continue;
1680 }
1681
1682 /*
1683 * Okay, we have the entire packet now...
1684 */
1685 *sc->sc_rxtailp = NULL;
1686 m = sc->sc_rxhead;
1687 len += sc->sc_rxlen;
1688
1689 WM_RXCHAIN_RESET(sc);
1690
1691 DPRINTF(WM_DEBUG_RX,
1692 ("%s: RX: have entire packet, len -> %d\n",
1693 sc->sc_dev.dv_xname, len));
1694
1695 /*
1696 * If an error occurred, update stats and drop the packet.
1697 */
1698 if (errors &
1699 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1700 ifp->if_ierrors++;
1701 if (errors & WRX_ER_SE)
1702 printf("%s: symbol error\n",
1703 sc->sc_dev.dv_xname);
1704 else if (errors & WRX_ER_SEQ)
1705 printf("%s: receive sequence error\n",
1706 sc->sc_dev.dv_xname);
1707 else if (errors & WRX_ER_CE)
1708 printf("%s: CRC error\n",
1709 sc->sc_dev.dv_xname);
1710 m_freem(m);
1711 continue;
1712 }
1713
1714 /*
1715 * No errors. Receive the packet.
1716 *
1717 * Note, we have configured the chip to include the
1718 * CRC with every packet.
1719 */
1720 m->m_flags |= M_HASFCS;
1721 m->m_pkthdr.rcvif = ifp;
1722 m->m_pkthdr.len = len;
1723
1724 #if 0 /* XXXJRT */
1725 /*
1726 * If VLANs are enabled, VLAN packets have been unwrapped
1727 * for us. Associate the tag with the packet.
1728 */
1729 if (sc->sc_ethercom.ec_nvlans != 0 &&
1730 (status & WRX_ST_VP) != 0) {
1731 struct m_tag *vtag;
1732
1733 vtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int),
1734 M_NOWAIT);
1735 if (vtag == NULL) {
1736 ifp->if_ierrors++;
1737 printf("%s: unable to allocate VLAN tag\n",
1738 sc->sc_dev.dv_xname);
1739 m_freem(m);
1740 continue;
1741 }
1742
1743 *(u_int *)(vtag + 1) =
1744 le16toh(sc->sc_rxdescs[i].wrx_special);
1745 }
1746 #endif /* XXXJRT */
1747
1748 /*
1749 * Set up checksum info for this packet.
1750 */
1751 if (status & WRX_ST_IPCS) {
1752 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1753 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1754 if (errors & WRX_ER_IPE)
1755 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1756 }
1757 if (status & WRX_ST_TCPCS) {
1758 /*
1759 * Note: we don't know if this was TCP or UDP,
1760 * so we just set both bits, and expect the
1761 * upper layers to deal.
1762 */
1763 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1764 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1765 if (errors & WRX_ER_TCPE)
1766 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1767 }
1768
1769 ifp->if_ipackets++;
1770
1771 #if NBPFILTER > 0
1772 /* Pass this up to any BPF listeners. */
1773 if (ifp->if_bpf)
1774 bpf_mtap(ifp->if_bpf, m);
1775 #endif /* NBPFILTER > 0 */
1776
1777 /* Pass it on. */
1778 (*ifp->if_input)(ifp, m);
1779 }
1780
1781 /* Update the receive pointer. */
1782 sc->sc_rxptr = i;
1783
1784 DPRINTF(WM_DEBUG_RX,
1785 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1786 }
1787
1788 /*
1789 * wm_linkintr:
1790 *
1791 * Helper; handle link interrupts.
1792 */
1793 void
1794 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1795 {
1796 uint32_t status;
1797
1798 /*
1799 * If we get a link status interrupt on a 1000BASE-T
1800 * device, just fall into the normal MII tick path.
1801 */
1802 if (sc->sc_flags & WM_F_HAS_MII) {
1803 if (icr & ICR_LSC) {
1804 DPRINTF(WM_DEBUG_LINK,
1805 ("%s: LINK: LSC -> mii_tick\n",
1806 sc->sc_dev.dv_xname));
1807 mii_tick(&sc->sc_mii);
1808 } else if (icr & ICR_RXSEQ) {
1809 DPRINTF(WM_DEBUG_LINK,
1810 ("%s: LINK Receive sequence error\n",
1811 sc->sc_dev.dv_xname));
1812 }
1813 return;
1814 }
1815
1816 /*
1817 * If we are now receiving /C/, check for link again in
1818 * a couple of link clock ticks.
1819 */
1820 if (icr & ICR_RXCFG) {
1821 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1822 sc->sc_dev.dv_xname));
1823 sc->sc_tbi_anstate = 2;
1824 }
1825
1826 if (icr & ICR_LSC) {
1827 status = CSR_READ(sc, WMREG_STATUS);
1828 if (status & STATUS_LU) {
1829 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1830 sc->sc_dev.dv_xname,
1831 (status & STATUS_FD) ? "FDX" : "HDX"));
1832 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1833 if (status & STATUS_FD)
1834 sc->sc_tctl |=
1835 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1836 else
1837 sc->sc_tctl |=
1838 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1839 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1840 sc->sc_tbi_linkup = 1;
1841 } else {
1842 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1843 sc->sc_dev.dv_xname));
1844 sc->sc_tbi_linkup = 0;
1845 }
1846 sc->sc_tbi_anstate = 2;
1847 wm_tbi_set_linkled(sc);
1848 } else if (icr & ICR_RXSEQ) {
1849 DPRINTF(WM_DEBUG_LINK,
1850 ("%s: LINK: Receive sequence error\n",
1851 sc->sc_dev.dv_xname));
1852 }
1853 }
1854
1855 /*
1856 * wm_tick:
1857 *
1858 * One second timer, used to check link status, sweep up
1859 * completed transmit jobs, etc.
1860 */
1861 void
1862 wm_tick(void *arg)
1863 {
1864 struct wm_softc *sc = arg;
1865 int s;
1866
1867 s = splnet();
1868
1869 if (sc->sc_flags & WM_F_HAS_MII)
1870 mii_tick(&sc->sc_mii);
1871 else
1872 wm_tbi_check_link(sc);
1873
1874 splx(s);
1875
1876 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1877 }
1878
1879 /*
1880 * wm_reset:
1881 *
1882 * Reset the i82542 chip.
1883 */
1884 void
1885 wm_reset(struct wm_softc *sc)
1886 {
1887 int i;
1888
1889 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1890 delay(10000);
1891
1892 for (i = 0; i < 1000; i++) {
1893 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1894 return;
1895 delay(20);
1896 }
1897
1898 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1899 printf("%s: WARNING: reset failed to complete\n",
1900 sc->sc_dev.dv_xname);
1901 }
1902
1903 /*
1904 * wm_init: [ifnet interface function]
1905 *
1906 * Initialize the interface. Must be called at splnet().
1907 */
1908 int
1909 wm_init(struct ifnet *ifp)
1910 {
1911 struct wm_softc *sc = ifp->if_softc;
1912 struct wm_rxsoft *rxs;
1913 int i, error = 0;
1914 uint32_t reg;
1915
1916 /* Cancel any pending I/O. */
1917 wm_stop(ifp, 0);
1918
1919 /* Reset the chip to a known state. */
1920 wm_reset(sc);
1921
1922 /* Initialize the transmit descriptor ring. */
1923 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1924 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1925 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1926 sc->sc_txfree = WM_NTXDESC;
1927 sc->sc_txnext = 0;
1928
1929 sc->sc_txctx_ipcs = 0xffffffff;
1930 sc->sc_txctx_tucs = 0xffffffff;
1931
1932 if (sc->sc_type < WM_T_82543) {
1933 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1934 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1935 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1936 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1937 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1938 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1939 } else {
1940 CSR_WRITE(sc, WMREG_TBDAH, 0);
1941 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1942 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1943 CSR_WRITE(sc, WMREG_TDH, 0);
1944 CSR_WRITE(sc, WMREG_TDT, 0);
1945 CSR_WRITE(sc, WMREG_TIDV, 128);
1946
1947 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1948 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1949 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1950 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1951 }
1952 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1953 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1954
1955 /* Initialize the transmit job descriptors. */
1956 for (i = 0; i < WM_TXQUEUELEN; i++)
1957 sc->sc_txsoft[i].txs_mbuf = NULL;
1958 sc->sc_txsfree = WM_TXQUEUELEN;
1959 sc->sc_txsnext = 0;
1960 sc->sc_txsdirty = 0;
1961
1962 /*
1963 * Initialize the receive descriptor and receive job
1964 * descriptor rings.
1965 */
1966 if (sc->sc_type < WM_T_82543) {
1967 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1968 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1969 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1970 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1971 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1972 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1973
1974 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1975 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1976 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1977 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1978 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1979 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1980 } else {
1981 CSR_WRITE(sc, WMREG_RDBAH, 0);
1982 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1983 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1984 CSR_WRITE(sc, WMREG_RDH, 0);
1985 CSR_WRITE(sc, WMREG_RDT, 0);
1986 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1987 }
1988 for (i = 0; i < WM_NRXDESC; i++) {
1989 rxs = &sc->sc_rxsoft[i];
1990 if (rxs->rxs_mbuf == NULL) {
1991 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1992 printf("%s: unable to allocate or map rx "
1993 "buffer %d, error = %d\n",
1994 sc->sc_dev.dv_xname, i, error);
1995 /*
1996 * XXX Should attempt to run with fewer receive
1997 * XXX buffers instead of just failing.
1998 */
1999 wm_rxdrain(sc);
2000 goto out;
2001 }
2002 } else
2003 WM_INIT_RXDESC(sc, i);
2004 }
2005 sc->sc_rxptr = 0;
2006 sc->sc_rxdiscard = 0;
2007 WM_RXCHAIN_RESET(sc);
2008
2009 /*
2010 * Clear out the VLAN table -- we don't use it (yet).
2011 */
2012 CSR_WRITE(sc, WMREG_VET, 0);
2013 for (i = 0; i < WM_VLAN_TABSIZE; i++)
2014 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
2015
2016 /*
2017 * Set up flow-control parameters.
2018 *
2019 * XXX Values could probably stand some tuning.
2020 */
2021 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
2022 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
2023 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
2024 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
2025
2026 if (sc->sc_type < WM_T_82543) {
2027 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
2028 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
2029 } else {
2030 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
2031 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
2032 }
2033 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
2034 }
2035
2036 #if 0 /* XXXJRT */
2037 /* Deal with VLAN enables. */
2038 if (sc->sc_ethercom.ec_nvlans != 0)
2039 sc->sc_ctrl |= CTRL_VME;
2040 else
2041 #endif /* XXXJRT */
2042 sc->sc_ctrl &= ~CTRL_VME;
2043
2044 /* Write the control registers. */
2045 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2046 #if 0
2047 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2048 #endif
2049
2050 /*
2051 * Set up checksum offload parameters.
2052 */
2053 reg = CSR_READ(sc, WMREG_RXCSUM);
2054 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
2055 reg |= RXCSUM_IPOFL;
2056 else
2057 reg &= ~RXCSUM_IPOFL;
2058 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2059 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2060 else {
2061 reg &= ~RXCSUM_TUOFL;
2062 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2063 reg &= ~RXCSUM_IPOFL;
2064 }
2065 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2066
2067 /*
2068 * Set up the interrupt registers.
2069 */
2070 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2071 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2072 ICR_RXO | ICR_RXT0;
2073 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2074 sc->sc_icr |= ICR_RXCFG;
2075 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2076
2077 /* Set up the inter-packet gap. */
2078 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2079
2080 #if 0 /* XXXJRT */
2081 /* Set the VLAN ethernetype. */
2082 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2083 #endif
2084
2085 /*
2086 * Set up the transmit control register; we start out with
2087 * a collision distance suitable for FDX, but update it whe
2088 * we resolve the media type.
2089 */
2090 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2091 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2092 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2093
2094 /* Set the media. */
2095 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2096
2097 /*
2098 * Set up the receive control register; we actually program
2099 * the register when we set the receive filter. Use multicast
2100 * address offset type 0.
2101 *
2102 * Only the i82544 has the ability to strip the incoming
2103 * CRC, so we don't enable that feature.
2104 */
2105 sc->sc_mchash_type = 0;
2106 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2107 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2108
2109 /* Set the receive filter. */
2110 wm_set_filter(sc);
2111
2112 /* Start the one second link check clock. */
2113 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2114
2115 /* ...all done! */
2116 ifp->if_flags |= IFF_RUNNING;
2117 ifp->if_flags &= ~IFF_OACTIVE;
2118
2119 out:
2120 if (error)
2121 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2122 return (error);
2123 }
2124
2125 /*
2126 * wm_rxdrain:
2127 *
2128 * Drain the receive queue.
2129 */
2130 void
2131 wm_rxdrain(struct wm_softc *sc)
2132 {
2133 struct wm_rxsoft *rxs;
2134 int i;
2135
2136 for (i = 0; i < WM_NRXDESC; i++) {
2137 rxs = &sc->sc_rxsoft[i];
2138 if (rxs->rxs_mbuf != NULL) {
2139 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2140 m_freem(rxs->rxs_mbuf);
2141 rxs->rxs_mbuf = NULL;
2142 }
2143 }
2144 }
2145
2146 /*
2147 * wm_stop: [ifnet interface function]
2148 *
2149 * Stop transmission on the interface.
2150 */
2151 void
2152 wm_stop(struct ifnet *ifp, int disable)
2153 {
2154 struct wm_softc *sc = ifp->if_softc;
2155 struct wm_txsoft *txs;
2156 int i;
2157
2158 /* Stop the one second clock. */
2159 callout_stop(&sc->sc_tick_ch);
2160
2161 if (sc->sc_flags & WM_F_HAS_MII) {
2162 /* Down the MII. */
2163 mii_down(&sc->sc_mii);
2164 }
2165
2166 /* Stop the transmit and receive processes. */
2167 CSR_WRITE(sc, WMREG_TCTL, 0);
2168 CSR_WRITE(sc, WMREG_RCTL, 0);
2169
2170 /* Release any queued transmit buffers. */
2171 for (i = 0; i < WM_TXQUEUELEN; i++) {
2172 txs = &sc->sc_txsoft[i];
2173 if (txs->txs_mbuf != NULL) {
2174 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2175 m_freem(txs->txs_mbuf);
2176 txs->txs_mbuf = NULL;
2177 }
2178 }
2179
2180 if (disable)
2181 wm_rxdrain(sc);
2182
2183 /* Mark the interface as down and cancel the watchdog timer. */
2184 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2185 ifp->if_timer = 0;
2186 }
2187
2188 /*
2189 * wm_read_eeprom:
2190 *
2191 * Read data from the serial EEPROM.
2192 */
2193 void
2194 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2195 {
2196 uint32_t reg;
2197 int i, x, addrbits = 6;
2198
2199 for (i = 0; i < wordcnt; i++) {
2200 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2201 reg = CSR_READ(sc, WMREG_EECD);
2202
2203 /* Get number of address bits. */
2204 if (reg & EECD_EE_SIZE)
2205 addrbits = 8;
2206
2207 /* Request EEPROM access. */
2208 reg |= EECD_EE_REQ;
2209 CSR_WRITE(sc, WMREG_EECD, reg);
2210
2211 /* ..and wait for it to be granted. */
2212 for (x = 0; x < 100; x++) {
2213 reg = CSR_READ(sc, WMREG_EECD);
2214 if (reg & EECD_EE_GNT)
2215 break;
2216 delay(5);
2217 }
2218 if ((reg & EECD_EE_GNT) == 0) {
2219 printf("%s: could not acquire EEPROM GNT\n",
2220 sc->sc_dev.dv_xname);
2221 *data = 0xffff;
2222 reg &= ~EECD_EE_REQ;
2223 CSR_WRITE(sc, WMREG_EECD, reg);
2224 continue;
2225 }
2226 } else
2227 reg = 0;
2228
2229 /* Clear SK and DI. */
2230 reg &= ~(EECD_SK | EECD_DI);
2231 CSR_WRITE(sc, WMREG_EECD, reg);
2232
2233 /* Set CHIP SELECT. */
2234 reg |= EECD_CS;
2235 CSR_WRITE(sc, WMREG_EECD, reg);
2236 delay(2);
2237
2238 /* Shift in the READ command. */
2239 for (x = 3; x > 0; x--) {
2240 if (UWIRE_OPC_READ & (1 << (x - 1)))
2241 reg |= EECD_DI;
2242 else
2243 reg &= ~EECD_DI;
2244 CSR_WRITE(sc, WMREG_EECD, reg);
2245 delay(2);
2246 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2247 delay(2);
2248 CSR_WRITE(sc, WMREG_EECD, reg);
2249 delay(2);
2250 }
2251
2252 /* Shift in address. */
2253 for (x = addrbits; x > 0; x--) {
2254 if ((word + i) & (1 << (x - 1)))
2255 reg |= EECD_DI;
2256 else
2257 reg &= ~EECD_DI;
2258 CSR_WRITE(sc, WMREG_EECD, reg);
2259 delay(2);
2260 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2261 delay(2);
2262 CSR_WRITE(sc, WMREG_EECD, reg);
2263 delay(2);
2264 }
2265
2266 /* Shift out the data. */
2267 reg &= ~EECD_DI;
2268 data[i] = 0;
2269 for (x = 16; x > 0; x--) {
2270 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2271 delay(2);
2272 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2273 data[i] |= (1 << (x - 1));
2274 CSR_WRITE(sc, WMREG_EECD, reg);
2275 delay(2);
2276 }
2277
2278 /* Clear CHIP SELECT. */
2279 reg &= ~EECD_CS;
2280 CSR_WRITE(sc, WMREG_EECD, reg);
2281 delay(2);
2282
2283 if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
2284 /* Release the EEPROM. */
2285 reg &= ~EECD_EE_REQ;
2286 CSR_WRITE(sc, WMREG_EECD, reg);
2287 }
2288 }
2289 }
2290
2291 /*
2292 * wm_add_rxbuf:
2293 *
2294 * Add a receive buffer to the indiciated descriptor.
2295 */
2296 int
2297 wm_add_rxbuf(struct wm_softc *sc, int idx)
2298 {
2299 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2300 struct mbuf *m;
2301 int error;
2302
2303 MGETHDR(m, M_DONTWAIT, MT_DATA);
2304 if (m == NULL)
2305 return (ENOBUFS);
2306
2307 MCLGET(m, M_DONTWAIT);
2308 if ((m->m_flags & M_EXT) == 0) {
2309 m_freem(m);
2310 return (ENOBUFS);
2311 }
2312
2313 if (rxs->rxs_mbuf != NULL)
2314 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2315
2316 rxs->rxs_mbuf = m;
2317
2318 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2319 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2320 BUS_DMA_READ|BUS_DMA_NOWAIT);
2321 if (error) {
2322 printf("%s: unable to load rx DMA map %d, error = %d\n",
2323 sc->sc_dev.dv_xname, idx, error);
2324 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2325 }
2326
2327 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2328 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2329
2330 WM_INIT_RXDESC(sc, idx);
2331
2332 return (0);
2333 }
2334
2335 /*
2336 * wm_set_ral:
2337 *
2338 * Set an entery in the receive address list.
2339 */
2340 static void
2341 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2342 {
2343 uint32_t ral_lo, ral_hi;
2344
2345 if (enaddr != NULL) {
2346 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2347 (enaddr[3] << 24);
2348 ral_hi = enaddr[4] | (enaddr[5] << 8);
2349 ral_hi |= RAL_AV;
2350 } else {
2351 ral_lo = 0;
2352 ral_hi = 0;
2353 }
2354
2355 if (sc->sc_type >= WM_T_82544) {
2356 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2357 ral_lo);
2358 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2359 ral_hi);
2360 } else {
2361 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2362 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2363 }
2364 }
2365
2366 /*
2367 * wm_mchash:
2368 *
2369 * Compute the hash of the multicast address for the 4096-bit
2370 * multicast filter.
2371 */
2372 static uint32_t
2373 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2374 {
2375 static const int lo_shift[4] = { 4, 3, 2, 0 };
2376 static const int hi_shift[4] = { 4, 5, 6, 8 };
2377 uint32_t hash;
2378
2379 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2380 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2381
2382 return (hash & 0xfff);
2383 }
2384
2385 /*
2386 * wm_set_filter:
2387 *
2388 * Set up the receive filter.
2389 */
2390 void
2391 wm_set_filter(struct wm_softc *sc)
2392 {
2393 struct ethercom *ec = &sc->sc_ethercom;
2394 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2395 struct ether_multi *enm;
2396 struct ether_multistep step;
2397 bus_addr_t mta_reg;
2398 uint32_t hash, reg, bit;
2399 int i;
2400
2401 if (sc->sc_type >= WM_T_82544)
2402 mta_reg = WMREG_CORDOVA_MTA;
2403 else
2404 mta_reg = WMREG_MTA;
2405
2406 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2407
2408 if (ifp->if_flags & IFF_BROADCAST)
2409 sc->sc_rctl |= RCTL_BAM;
2410 if (ifp->if_flags & IFF_PROMISC) {
2411 sc->sc_rctl |= RCTL_UPE;
2412 goto allmulti;
2413 }
2414
2415 /*
2416 * Set the station address in the first RAL slot, and
2417 * clear the remaining slots.
2418 */
2419 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2420 for (i = 1; i < WM_RAL_TABSIZE; i++)
2421 wm_set_ral(sc, NULL, i);
2422
2423 /* Clear out the multicast table. */
2424 for (i = 0; i < WM_MC_TABSIZE; i++)
2425 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2426
2427 ETHER_FIRST_MULTI(step, ec, enm);
2428 while (enm != NULL) {
2429 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2430 /*
2431 * We must listen to a range of multicast addresses.
2432 * For now, just accept all multicasts, rather than
2433 * trying to set only those filter bits needed to match
2434 * the range. (At this time, the only use of address
2435 * ranges is for IP multicast routing, for which the
2436 * range is big enough to require all bits set.)
2437 */
2438 goto allmulti;
2439 }
2440
2441 hash = wm_mchash(sc, enm->enm_addrlo);
2442
2443 reg = (hash >> 5) & 0x7f;
2444 bit = hash & 0x1f;
2445
2446 hash = CSR_READ(sc, mta_reg + (reg << 2));
2447 hash |= 1U << bit;
2448
2449 /* XXX Hardware bug?? */
2450 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2451 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2452 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2453 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2454 } else
2455 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2456
2457 ETHER_NEXT_MULTI(step, enm);
2458 }
2459
2460 ifp->if_flags &= ~IFF_ALLMULTI;
2461 goto setit;
2462
2463 allmulti:
2464 ifp->if_flags |= IFF_ALLMULTI;
2465 sc->sc_rctl |= RCTL_MPE;
2466
2467 setit:
2468 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2469 }
2470
2471 /*
2472 * wm_tbi_mediainit:
2473 *
2474 * Initialize media for use on 1000BASE-X devices.
2475 */
2476 void
2477 wm_tbi_mediainit(struct wm_softc *sc)
2478 {
2479 const char *sep = "";
2480
2481 if (sc->sc_type < WM_T_82543)
2482 sc->sc_tipg = TIPG_WM_DFLT;
2483 else
2484 sc->sc_tipg = TIPG_LG_DFLT;
2485
2486 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
2487 wm_tbi_mediastatus);
2488
2489 /*
2490 * SWD Pins:
2491 *
2492 * 0 = Link LED (output)
2493 * 1 = Loss Of Signal (input)
2494 */
2495 sc->sc_ctrl |= CTRL_SWDPIO(0);
2496 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2497
2498 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2499
2500 #define ADD(ss, mm, dd) \
2501 do { \
2502 printf("%s%s", sep, ss); \
2503 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL); \
2504 sep = ", "; \
2505 } while (/*CONSTCOND*/0)
2506
2507 printf("%s: ", sc->sc_dev.dv_xname);
2508 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2509 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2510 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2511 printf("\n");
2512
2513 #undef ADD
2514
2515 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2516 }
2517
2518 /*
2519 * wm_tbi_mediastatus: [ifmedia interface function]
2520 *
2521 * Get the current interface media status on a 1000BASE-X device.
2522 */
2523 void
2524 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2525 {
2526 struct wm_softc *sc = ifp->if_softc;
2527
2528 ifmr->ifm_status = IFM_AVALID;
2529 ifmr->ifm_active = IFM_ETHER;
2530
2531 if (sc->sc_tbi_linkup == 0) {
2532 ifmr->ifm_active |= IFM_NONE;
2533 return;
2534 }
2535
2536 ifmr->ifm_status |= IFM_ACTIVE;
2537 ifmr->ifm_active |= IFM_1000_SX;
2538 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2539 ifmr->ifm_active |= IFM_FDX;
2540 }
2541
2542 /*
2543 * wm_tbi_mediachange: [ifmedia interface function]
2544 *
2545 * Set hardware to newly-selected media on a 1000BASE-X device.
2546 */
2547 int
2548 wm_tbi_mediachange(struct ifnet *ifp)
2549 {
2550 struct wm_softc *sc = ifp->if_softc;
2551 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2552 uint32_t status;
2553 int i;
2554
2555 sc->sc_txcw = ife->ifm_data;
2556 if (sc->sc_ctrl & CTRL_RFCE)
2557 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2558 if (sc->sc_ctrl & CTRL_TFCE)
2559 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2560 sc->sc_txcw |= TXCW_ANE;
2561
2562 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2563 delay(10000);
2564
2565 sc->sc_tbi_anstate = 0;
2566
2567 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2568 /* Have signal; wait for the link to come up. */
2569 for (i = 0; i < 50; i++) {
2570 delay(10000);
2571 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2572 break;
2573 }
2574
2575 status = CSR_READ(sc, WMREG_STATUS);
2576 if (status & STATUS_LU) {
2577 /* Link is up. */
2578 DPRINTF(WM_DEBUG_LINK,
2579 ("%s: LINK: set media -> link up %s\n",
2580 sc->sc_dev.dv_xname,
2581 (status & STATUS_FD) ? "FDX" : "HDX"));
2582 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2583 if (status & STATUS_FD)
2584 sc->sc_tctl |=
2585 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2586 else
2587 sc->sc_tctl |=
2588 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2589 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2590 sc->sc_tbi_linkup = 1;
2591 } else {
2592 /* Link is down. */
2593 DPRINTF(WM_DEBUG_LINK,
2594 ("%s: LINK: set media -> link down\n",
2595 sc->sc_dev.dv_xname));
2596 sc->sc_tbi_linkup = 0;
2597 }
2598 } else {
2599 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2600 sc->sc_dev.dv_xname));
2601 sc->sc_tbi_linkup = 0;
2602 }
2603
2604 wm_tbi_set_linkled(sc);
2605
2606 return (0);
2607 }
2608
2609 /*
2610 * wm_tbi_set_linkled:
2611 *
2612 * Update the link LED on 1000BASE-X devices.
2613 */
2614 void
2615 wm_tbi_set_linkled(struct wm_softc *sc)
2616 {
2617
2618 if (sc->sc_tbi_linkup)
2619 sc->sc_ctrl |= CTRL_SWDPIN(0);
2620 else
2621 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2622
2623 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2624 }
2625
2626 /*
2627 * wm_tbi_check_link:
2628 *
2629 * Check the link on 1000BASE-X devices.
2630 */
2631 void
2632 wm_tbi_check_link(struct wm_softc *sc)
2633 {
2634 uint32_t rxcw, ctrl, status;
2635
2636 if (sc->sc_tbi_anstate == 0)
2637 return;
2638 else if (sc->sc_tbi_anstate > 1) {
2639 DPRINTF(WM_DEBUG_LINK,
2640 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2641 sc->sc_tbi_anstate));
2642 sc->sc_tbi_anstate--;
2643 return;
2644 }
2645
2646 sc->sc_tbi_anstate = 0;
2647
2648 rxcw = CSR_READ(sc, WMREG_RXCW);
2649 ctrl = CSR_READ(sc, WMREG_CTRL);
2650 status = CSR_READ(sc, WMREG_STATUS);
2651
2652 if ((status & STATUS_LU) == 0) {
2653 DPRINTF(WM_DEBUG_LINK,
2654 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2655 sc->sc_tbi_linkup = 0;
2656 } else {
2657 DPRINTF(WM_DEBUG_LINK,
2658 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2659 (status & STATUS_FD) ? "FDX" : "HDX"));
2660 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2661 if (status & STATUS_FD)
2662 sc->sc_tctl |=
2663 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2664 else
2665 sc->sc_tctl |=
2666 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2667 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2668 sc->sc_tbi_linkup = 1;
2669 }
2670
2671 wm_tbi_set_linkled(sc);
2672 }
2673
2674 /*
2675 * wm_gmii_reset:
2676 *
2677 * Reset the PHY.
2678 */
2679 void
2680 wm_gmii_reset(struct wm_softc *sc)
2681 {
2682 uint32_t reg;
2683
2684 if (sc->sc_type >= WM_T_82544) {
2685 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2686 delay(20000);
2687
2688 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2689 delay(20000);
2690 } else {
2691 /* The PHY reset pin is active-low. */
2692 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2693 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2694 CTRL_EXT_SWDPIN(4));
2695 reg |= CTRL_EXT_SWDPIO(4);
2696
2697 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2698 delay(10);
2699
2700 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2701 delay(10);
2702
2703 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2704 delay(10);
2705 #if 0
2706 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2707 #endif
2708 }
2709 }
2710
2711 /*
2712 * wm_gmii_mediainit:
2713 *
2714 * Initialize media for use on 1000BASE-T devices.
2715 */
2716 void
2717 wm_gmii_mediainit(struct wm_softc *sc)
2718 {
2719 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2720
2721 /* We have MII. */
2722 sc->sc_flags |= WM_F_HAS_MII;
2723
2724 sc->sc_tipg = TIPG_1000T_DFLT;
2725
2726 /*
2727 * Let the chip set speed/duplex on its own based on
2728 * signals from the PHY.
2729 */
2730 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2731 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2732
2733 /* Initialize our media structures and probe the GMII. */
2734 sc->sc_mii.mii_ifp = ifp;
2735
2736 if (sc->sc_type >= WM_T_82544) {
2737 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2738 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2739 } else {
2740 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2741 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2742 }
2743 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2744
2745 wm_gmii_reset(sc);
2746
2747 ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_gmii_mediachange,
2748 wm_gmii_mediastatus);
2749
2750 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2751 MII_OFFSET_ANY, 0);
2752 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2753 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2754 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2755 } else
2756 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2757 }
2758
2759 /*
2760 * wm_gmii_mediastatus: [ifmedia interface function]
2761 *
2762 * Get the current interface media status on a 1000BASE-T device.
2763 */
2764 void
2765 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2766 {
2767 struct wm_softc *sc = ifp->if_softc;
2768
2769 mii_pollstat(&sc->sc_mii);
2770 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2771 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2772 }
2773
2774 /*
2775 * wm_gmii_mediachange: [ifmedia interface function]
2776 *
2777 * Set hardware to newly-selected media on a 1000BASE-T device.
2778 */
2779 int
2780 wm_gmii_mediachange(struct ifnet *ifp)
2781 {
2782 struct wm_softc *sc = ifp->if_softc;
2783
2784 if (ifp->if_flags & IFF_UP)
2785 mii_mediachg(&sc->sc_mii);
2786 return (0);
2787 }
2788
2789 #define MDI_IO CTRL_SWDPIN(2)
2790 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2791 #define MDI_CLK CTRL_SWDPIN(3)
2792
2793 static void
2794 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2795 {
2796 uint32_t i, v;
2797
2798 v = CSR_READ(sc, WMREG_CTRL);
2799 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2800 v |= MDI_DIR | CTRL_SWDPIO(3);
2801
2802 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2803 if (data & i)
2804 v |= MDI_IO;
2805 else
2806 v &= ~MDI_IO;
2807 CSR_WRITE(sc, WMREG_CTRL, v);
2808 delay(10);
2809 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2810 delay(10);
2811 CSR_WRITE(sc, WMREG_CTRL, v);
2812 delay(10);
2813 }
2814 }
2815
2816 static uint32_t
2817 i82543_mii_recvbits(struct wm_softc *sc)
2818 {
2819 uint32_t v, i, data = 0;
2820
2821 v = CSR_READ(sc, WMREG_CTRL);
2822 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2823 v |= CTRL_SWDPIO(3);
2824
2825 CSR_WRITE(sc, WMREG_CTRL, v);
2826 delay(10);
2827 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2828 delay(10);
2829 CSR_WRITE(sc, WMREG_CTRL, v);
2830 delay(10);
2831
2832 for (i = 0; i < 16; i++) {
2833 data <<= 1;
2834 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2835 delay(10);
2836 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2837 data |= 1;
2838 CSR_WRITE(sc, WMREG_CTRL, v);
2839 delay(10);
2840 }
2841
2842 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2843 delay(10);
2844 CSR_WRITE(sc, WMREG_CTRL, v);
2845 delay(10);
2846
2847 return (data);
2848 }
2849
2850 #undef MDI_IO
2851 #undef MDI_DIR
2852 #undef MDI_CLK
2853
2854 /*
2855 * wm_gmii_i82543_readreg: [mii interface function]
2856 *
2857 * Read a PHY register on the GMII (i82543 version).
2858 */
2859 int
2860 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2861 {
2862 struct wm_softc *sc = (void *) self;
2863 int rv;
2864
2865 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2866 i82543_mii_sendbits(sc, reg | (phy << 5) |
2867 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2868 rv = i82543_mii_recvbits(sc) & 0xffff;
2869
2870 DPRINTF(WM_DEBUG_GMII,
2871 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2872 sc->sc_dev.dv_xname, phy, reg, rv));
2873
2874 return (rv);
2875 }
2876
2877 /*
2878 * wm_gmii_i82543_writereg: [mii interface function]
2879 *
2880 * Write a PHY register on the GMII (i82543 version).
2881 */
2882 void
2883 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2884 {
2885 struct wm_softc *sc = (void *) self;
2886
2887 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2888 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2889 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2890 (MII_COMMAND_START << 30), 32);
2891 }
2892
2893 /*
2894 * wm_gmii_i82544_readreg: [mii interface function]
2895 *
2896 * Read a PHY register on the GMII.
2897 */
2898 int
2899 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2900 {
2901 struct wm_softc *sc = (void *) self;
2902 uint32_t mdic;
2903 int i, rv;
2904
2905 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2906 MDIC_REGADD(reg));
2907
2908 for (i = 0; i < 100; i++) {
2909 mdic = CSR_READ(sc, WMREG_MDIC);
2910 if (mdic & MDIC_READY)
2911 break;
2912 delay(10);
2913 }
2914
2915 if ((mdic & MDIC_READY) == 0) {
2916 printf("%s: MDIC read timed out: phy %d reg %d\n",
2917 sc->sc_dev.dv_xname, phy, reg);
2918 rv = 0;
2919 } else if (mdic & MDIC_E) {
2920 #if 0 /* This is normal if no PHY is present. */
2921 printf("%s: MDIC read error: phy %d reg %d\n",
2922 sc->sc_dev.dv_xname, phy, reg);
2923 #endif
2924 rv = 0;
2925 } else {
2926 rv = MDIC_DATA(mdic);
2927 if (rv == 0xffff)
2928 rv = 0;
2929 }
2930
2931 return (rv);
2932 }
2933
2934 /*
2935 * wm_gmii_i82544_writereg: [mii interface function]
2936 *
2937 * Write a PHY register on the GMII.
2938 */
2939 void
2940 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2941 {
2942 struct wm_softc *sc = (void *) self;
2943 uint32_t mdic;
2944 int i;
2945
2946 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2947 MDIC_REGADD(reg) | MDIC_DATA(val));
2948
2949 for (i = 0; i < 100; i++) {
2950 mdic = CSR_READ(sc, WMREG_MDIC);
2951 if (mdic & MDIC_READY)
2952 break;
2953 delay(10);
2954 }
2955
2956 if ((mdic & MDIC_READY) == 0)
2957 printf("%s: MDIC write timed out: phy %d reg %d\n",
2958 sc->sc_dev.dv_xname, phy, reg);
2959 else if (mdic & MDIC_E)
2960 printf("%s: MDIC write error: phy %d reg %d\n",
2961 sc->sc_dev.dv_xname, phy, reg);
2962 }
2963
2964 /*
2965 * wm_gmii_statchg: [mii interface function]
2966 *
2967 * Callback from MII layer when media changes.
2968 */
2969 void
2970 wm_gmii_statchg(struct device *self)
2971 {
2972 struct wm_softc *sc = (void *) self;
2973
2974 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2975
2976 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2977 DPRINTF(WM_DEBUG_LINK,
2978 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2979 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2980 } else {
2981 DPRINTF(WM_DEBUG_LINK,
2982 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2983 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2984 }
2985
2986 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2987 }
2988