if_wm.c revision 1.14 1 /* $NetBSD: if_wm.c,v 1.14 2002/07/14 01:34:00 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
40 *
41 * TODO (in order of importance):
42 *
43 * - Fix TCP/UDP checksums.
44 * Status: Several successful transmissions with offloaded
45 * checksums occur. After several successful transmissions,
46 * the chip goes catatonic. The watchdog timer fires, which
47 * resets the chip, and gets things moving again, until the
48 * cycle repeats.
49 *
50 * - Make GMII work on the i82543.
51 *
52 * - Fix hw VLAN assist.
53 *
54 * - Jumbo frames -- requires changes to network stack due to
55 * lame buffer length handling on chip.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86 #include <netinet/tcp.h> /* XXX for struct tcphdr */
87
88 #include <machine/bus.h>
89 #include <machine/intr.h>
90 #include <machine/endian.h>
91
92 #include <dev/mii/mii.h>
93 #include <dev/mii/miivar.h>
94 #include <dev/mii/mii_bitbang.h>
95
96 #include <dev/pci/pcireg.h>
97 #include <dev/pci/pcivar.h>
98 #include <dev/pci/pcidevs.h>
99
100 #include <dev/pci/if_wmreg.h>
101
102 #ifdef WM_DEBUG
103 #define WM_DEBUG_LINK 0x01
104 #define WM_DEBUG_TX 0x02
105 #define WM_DEBUG_RX 0x04
106 #define WM_DEBUG_GMII 0x08
107 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
108
109 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
110 #else
111 #define DPRINTF(x, y) /* nothing */
112 #endif /* WM_DEBUG */
113
114 /*
115 * Transmit descriptor list size. Due to errata, we can only have
116 * 256 hardware descriptors in the ring. We tell the upper layers
117 * that they can queue a lot of packets, and we go ahead and mange
118 * up to 64 of them at a time. We allow up to 16 DMA segments per
119 * packet.
120 */
121 #define WM_NTXSEGS 16
122 #define WM_IFQUEUELEN 256
123 #define WM_TXQUEUELEN 64
124 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
125 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
126 #define WM_NTXDESC 256
127 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
128 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
129 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
130
131 /*
132 * Receive descriptor list size. We have one Rx buffer for normal
133 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
134 * packet. We allocate 256 receive descriptors, each with a 2k
135 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
136 */
137 #define WM_NRXDESC 256
138 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
139 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
140 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
141
142 /*
143 * Control structures are DMA'd to the i82542 chip. We allocate them in
144 * a single clump that maps to a single DMA segment to make serveral things
145 * easier.
146 */
147 struct wm_control_data {
148 /*
149 * The transmit descriptors.
150 */
151 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
152
153 /*
154 * The receive descriptors.
155 */
156 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
157 };
158
159 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
160 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
161 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
162
163 /*
164 * Software state for transmit jobs.
165 */
166 struct wm_txsoft {
167 struct mbuf *txs_mbuf; /* head of our mbuf chain */
168 bus_dmamap_t txs_dmamap; /* our DMA map */
169 int txs_firstdesc; /* first descriptor in packet */
170 int txs_lastdesc; /* last descriptor in packet */
171 int txs_ndesc; /* # of descriptors used */
172 };
173
174 /*
175 * Software state for receive buffers. Each descriptor gets a
176 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
177 * more than one buffer, we chain them together.
178 */
179 struct wm_rxsoft {
180 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
181 bus_dmamap_t rxs_dmamap; /* our DMA map */
182 };
183
184 /*
185 * Software state per device.
186 */
187 struct wm_softc {
188 struct device sc_dev; /* generic device information */
189 bus_space_tag_t sc_st; /* bus space tag */
190 bus_space_handle_t sc_sh; /* bus space handle */
191 bus_dma_tag_t sc_dmat; /* bus DMA tag */
192 struct ethercom sc_ethercom; /* ethernet common data */
193 void *sc_sdhook; /* shutdown hook */
194
195 int sc_type; /* chip type; see below */
196 int sc_flags; /* flags; see below */
197
198 void *sc_ih; /* interrupt cookie */
199
200 struct mii_data sc_mii; /* MII/media information */
201
202 struct callout sc_tick_ch; /* tick callout */
203
204 bus_dmamap_t sc_cddmamap; /* control data DMA map */
205 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
206
207 /*
208 * Software state for the transmit and receive descriptors.
209 */
210 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
211 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
212
213 /*
214 * Control data structures.
215 */
216 struct wm_control_data *sc_control_data;
217 #define sc_txdescs sc_control_data->wcd_txdescs
218 #define sc_rxdescs sc_control_data->wcd_rxdescs
219
220 #ifdef WM_EVENT_COUNTERS
221 /* Event counters. */
222 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
223 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
224 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
225 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
226 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
227 struct evcnt sc_ev_rxintr; /* Rx interrupts */
228 struct evcnt sc_ev_linkintr; /* Link interrupts */
229
230 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
231 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
232 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
233 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
234
235 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
236 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
237 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
238
239 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
240 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
241
242 struct evcnt sc_ev_tu; /* Tx underrun */
243 #endif /* WM_EVENT_COUNTERS */
244
245 bus_addr_t sc_tdt_reg; /* offset of TDT register */
246
247 int sc_txfree; /* number of free Tx descriptors */
248 int sc_txnext; /* next ready Tx descriptor */
249
250 int sc_txsfree; /* number of free Tx jobs */
251 int sc_txsnext; /* next free Tx job */
252 int sc_txsdirty; /* dirty Tx jobs */
253
254 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
255 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
256
257 bus_addr_t sc_rdt_reg; /* offset of RDT register */
258
259 int sc_rxptr; /* next ready Rx descriptor/queue ent */
260 int sc_rxdiscard;
261 int sc_rxlen;
262 struct mbuf *sc_rxhead;
263 struct mbuf *sc_rxtail;
264 struct mbuf **sc_rxtailp;
265
266 uint32_t sc_ctrl; /* prototype CTRL register */
267 #if 0
268 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
269 #endif
270 uint32_t sc_icr; /* prototype interrupt bits */
271 uint32_t sc_tctl; /* prototype TCTL register */
272 uint32_t sc_rctl; /* prototype RCTL register */
273 uint32_t sc_txcw; /* prototype TXCW register */
274 uint32_t sc_tipg; /* prototype TIPG register */
275
276 int sc_tbi_linkup; /* TBI link status */
277 int sc_tbi_anstate; /* autonegotiation state */
278
279 int sc_mchash_type; /* multicast filter offset */
280 };
281
282 #define WM_RXCHAIN_RESET(sc) \
283 do { \
284 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
285 *(sc)->sc_rxtailp = NULL; \
286 (sc)->sc_rxlen = 0; \
287 } while (/*CONSTCOND*/0)
288
289 #define WM_RXCHAIN_LINK(sc, m) \
290 do { \
291 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
292 (sc)->sc_rxtailp = &(m)->m_next; \
293 } while (/*CONSTCOND*/0)
294
295 /* sc_type */
296 #define WM_T_82542_2_0 0 /* i82542 2.0 (really old) */
297 #define WM_T_82542_2_1 1 /* i82542 2.1+ (old) */
298 #define WM_T_82543 2 /* i82543 */
299 #define WM_T_82544 3 /* i82544 */
300 #define WM_T_82540 4 /* i82540 */
301 #define WM_T_82545 5 /* i82545 */
302 #define WM_T_82546 6 /* i82546 */
303
304 /* sc_flags */
305 #define WM_F_HAS_MII 0x01 /* has MII */
306
307 #ifdef WM_EVENT_COUNTERS
308 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
309 #else
310 #define WM_EVCNT_INCR(ev) /* nothing */
311 #endif
312
313 #define CSR_READ(sc, reg) \
314 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
315 #define CSR_WRITE(sc, reg, val) \
316 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
317
318 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
319 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
320
321 #define WM_CDTXSYNC(sc, x, n, ops) \
322 do { \
323 int __x, __n; \
324 \
325 __x = (x); \
326 __n = (n); \
327 \
328 /* If it will wrap around, sync to the end of the ring. */ \
329 if ((__x + __n) > WM_NTXDESC) { \
330 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
331 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
332 (WM_NTXDESC - __x), (ops)); \
333 __n -= (WM_NTXDESC - __x); \
334 __x = 0; \
335 } \
336 \
337 /* Now sync whatever is left. */ \
338 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
339 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
340 } while (/*CONSTCOND*/0)
341
342 #define WM_CDRXSYNC(sc, x, ops) \
343 do { \
344 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
345 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
346 } while (/*CONSTCOND*/0)
347
348 #define WM_INIT_RXDESC(sc, x) \
349 do { \
350 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
351 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
352 struct mbuf *__m = __rxs->rxs_mbuf; \
353 \
354 /* \
355 * Note: We scoot the packet forward 2 bytes in the buffer \
356 * so that the payload after the Ethernet header is aligned \
357 * to a 4-byte boundary. \
358 * \
359 * XXX BRAINDAMAGE ALERT! \
360 * The stupid chip uses the same size for every buffer, which \
361 * is set in the Receive Control register. We are using the 2K \
362 * size option, but what we REALLY want is (2K - 2)! For this \
363 * reason, we can't accept packets longer than the standard \
364 * Ethernet MTU, without incurring a big penalty to copy every \
365 * incoming packet to a new, suitably aligned buffer. \
366 * \
367 * We'll need to make some changes to the layer 3/4 parts of \
368 * the stack (to copy the headers to a new buffer if not \
369 * aligned) in order to support large MTU on this chip. Lame. \
370 */ \
371 __m->m_data = __m->m_ext.ext_buf + 2; \
372 \
373 __rxd->wrx_addr.wa_low = \
374 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
375 __rxd->wrx_addr.wa_high = 0; \
376 __rxd->wrx_len = 0; \
377 __rxd->wrx_cksum = 0; \
378 __rxd->wrx_status = 0; \
379 __rxd->wrx_errors = 0; \
380 __rxd->wrx_special = 0; \
381 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
382 \
383 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
384 } while (/*CONSTCOND*/0)
385
386 void wm_start(struct ifnet *);
387 void wm_watchdog(struct ifnet *);
388 int wm_ioctl(struct ifnet *, u_long, caddr_t);
389 int wm_init(struct ifnet *);
390 void wm_stop(struct ifnet *, int);
391
392 void wm_shutdown(void *);
393
394 void wm_reset(struct wm_softc *);
395 void wm_rxdrain(struct wm_softc *);
396 int wm_add_rxbuf(struct wm_softc *, int);
397 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
398 void wm_tick(void *);
399
400 void wm_set_filter(struct wm_softc *);
401
402 int wm_intr(void *);
403 void wm_txintr(struct wm_softc *);
404 void wm_rxintr(struct wm_softc *);
405 void wm_linkintr(struct wm_softc *, uint32_t);
406
407 void wm_tbi_mediainit(struct wm_softc *);
408 int wm_tbi_mediachange(struct ifnet *);
409 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
410
411 void wm_tbi_set_linkled(struct wm_softc *);
412 void wm_tbi_check_link(struct wm_softc *);
413
414 void wm_gmii_reset(struct wm_softc *);
415
416 int wm_gmii_i82543_readreg(struct device *, int, int);
417 void wm_gmii_i82543_writereg(struct device *, int, int, int);
418
419 int wm_gmii_i82544_readreg(struct device *, int, int);
420 void wm_gmii_i82544_writereg(struct device *, int, int, int);
421
422 void wm_gmii_statchg(struct device *);
423
424 void wm_gmii_mediainit(struct wm_softc *);
425 int wm_gmii_mediachange(struct ifnet *);
426 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
427
428 int wm_match(struct device *, struct cfdata *, void *);
429 void wm_attach(struct device *, struct device *, void *);
430
431 int wm_copy_small = 0;
432
433 struct cfattach wm_ca = {
434 sizeof(struct wm_softc), wm_match, wm_attach,
435 };
436
437 /*
438 * Devices supported by this driver.
439 */
440 const struct wm_product {
441 pci_vendor_id_t wmp_vendor;
442 pci_product_id_t wmp_product;
443 const char *wmp_name;
444 int wmp_type;
445 int wmp_flags;
446 #define WMP_F_1000X 0x01
447 #define WMP_F_1000T 0x02
448 } wm_products[] = {
449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
450 "Intel i82542 1000BASE-X Ethernet",
451 WM_T_82542_2_1, WMP_F_1000X },
452
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_FIBER,
454 "Intel i82543GC 1000BASE-X Ethernet",
455 WM_T_82543, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543GC_COPPER,
458 "Intel i82543GC 1000BASE-T Ethernet",
459 WM_T_82543, WMP_F_1000T },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_COPPER,
462 "Intel i82544EI 1000BASE-T Ethernet",
463 WM_T_82544, WMP_F_1000T },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544EI_FIBER,
466 "Intel i82544EI 1000BASE-X Ethernet",
467 WM_T_82544, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_COPPER,
470 "Intel i82544GC 1000BASE-T Ethernet",
471 WM_T_82544, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_LOM,
474 "Intel i82544GC (LOM) 1000BASE-T Ethernet",
475 WM_T_82544, WMP_F_1000T },
476
477 { 0, 0,
478 NULL,
479 0, 0 },
480 };
481
482 #ifdef WM_EVENT_COUNTERS
483 #if WM_NTXSEGS != 16
484 #error Update wm_txseg_evcnt_names
485 #endif
486 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
487 "txseg1",
488 "txseg2",
489 "txseg3",
490 "txseg4",
491 "txseg5",
492 "txseg6",
493 "txseg7",
494 "txseg8",
495 "txseg9",
496 "txseg10",
497 "txseg11",
498 "txseg12",
499 "txseg13",
500 "txseg14",
501 "txseg15",
502 "txseg16",
503 };
504 #endif /* WM_EVENT_COUNTERS */
505
506 static const struct wm_product *
507 wm_lookup(const struct pci_attach_args *pa)
508 {
509 const struct wm_product *wmp;
510
511 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
512 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
513 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
514 return (wmp);
515 }
516 return (NULL);
517 }
518
519 int
520 wm_match(struct device *parent, struct cfdata *cf, void *aux)
521 {
522 struct pci_attach_args *pa = aux;
523
524 if (wm_lookup(pa) != NULL)
525 return (1);
526
527 return (0);
528 }
529
530 void
531 wm_attach(struct device *parent, struct device *self, void *aux)
532 {
533 struct wm_softc *sc = (void *) self;
534 struct pci_attach_args *pa = aux;
535 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
536 pci_chipset_tag_t pc = pa->pa_pc;
537 pci_intr_handle_t ih;
538 const char *intrstr = NULL;
539 bus_space_tag_t memt;
540 bus_space_handle_t memh;
541 bus_dma_segment_t seg;
542 int memh_valid;
543 int i, rseg, error;
544 const struct wm_product *wmp;
545 uint8_t enaddr[ETHER_ADDR_LEN];
546 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
547 pcireg_t preg, memtype;
548 int pmreg;
549
550 callout_init(&sc->sc_tick_ch);
551
552 wmp = wm_lookup(pa);
553 if (wmp == NULL) {
554 printf("\n");
555 panic("wm_attach: impossible");
556 }
557
558 sc->sc_dmat = pa->pa_dmat;
559
560 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
561 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
562
563 sc->sc_type = wmp->wmp_type;
564 if (sc->sc_type < WM_T_82543) {
565 if (preg < 2) {
566 printf("%s: i82542 must be at least rev. 2\n",
567 sc->sc_dev.dv_xname);
568 return;
569 }
570 if (preg < 3)
571 sc->sc_type = WM_T_82542_2_0;
572 }
573
574 /*
575 * Map the device.
576 */
577 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
578 switch (memtype) {
579 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
580 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
581 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
582 memtype, 0, &memt, &memh, NULL, NULL) == 0);
583 break;
584 default:
585 memh_valid = 0;
586 }
587
588 if (memh_valid) {
589 sc->sc_st = memt;
590 sc->sc_sh = memh;
591 } else {
592 printf("%s: unable to map device registers\n",
593 sc->sc_dev.dv_xname);
594 return;
595 }
596
597 /* Enable bus mastering. Disable MWI on the i82542 2.0. */
598 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
599 preg |= PCI_COMMAND_MASTER_ENABLE;
600 if (sc->sc_type < WM_T_82542_2_1)
601 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
602 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
603
604 /* Get it out of power save mode, if needed. */
605 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
606 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
607 if (preg == 3) {
608 /*
609 * The card has lost all configuration data in
610 * this state, so punt.
611 */
612 printf("%s: unable to wake from power state D3\n",
613 sc->sc_dev.dv_xname);
614 return;
615 }
616 if (preg != 0) {
617 printf("%s: waking up from power state D%d\n",
618 sc->sc_dev.dv_xname, preg);
619 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
620 }
621 }
622
623 /*
624 * Map and establish our interrupt.
625 */
626 if (pci_intr_map(pa, &ih)) {
627 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
628 return;
629 }
630 intrstr = pci_intr_string(pc, ih);
631 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
632 if (sc->sc_ih == NULL) {
633 printf("%s: unable to establish interrupt",
634 sc->sc_dev.dv_xname);
635 if (intrstr != NULL)
636 printf(" at %s", intrstr);
637 printf("\n");
638 return;
639 }
640 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
641
642 /*
643 * Allocate the control data structures, and create and load the
644 * DMA map for it.
645 */
646 if ((error = bus_dmamem_alloc(sc->sc_dmat,
647 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
648 0)) != 0) {
649 printf("%s: unable to allocate control data, error = %d\n",
650 sc->sc_dev.dv_xname, error);
651 goto fail_0;
652 }
653
654 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
655 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
656 BUS_DMA_COHERENT)) != 0) {
657 printf("%s: unable to map control data, error = %d\n",
658 sc->sc_dev.dv_xname, error);
659 goto fail_1;
660 }
661
662 if ((error = bus_dmamap_create(sc->sc_dmat,
663 sizeof(struct wm_control_data), 1,
664 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
665 printf("%s: unable to create control data DMA map, "
666 "error = %d\n", sc->sc_dev.dv_xname, error);
667 goto fail_2;
668 }
669
670 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
671 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
672 0)) != 0) {
673 printf("%s: unable to load control data DMA map, error = %d\n",
674 sc->sc_dev.dv_xname, error);
675 goto fail_3;
676 }
677
678 /*
679 * Create the transmit buffer DMA maps.
680 */
681 for (i = 0; i < WM_TXQUEUELEN; i++) {
682 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
683 WM_NTXSEGS, MCLBYTES, 0, 0,
684 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
685 printf("%s: unable to create Tx DMA map %d, "
686 "error = %d\n", sc->sc_dev.dv_xname, i, error);
687 goto fail_4;
688 }
689 }
690
691 /*
692 * Create the receive buffer DMA maps.
693 */
694 for (i = 0; i < WM_NRXDESC; i++) {
695 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
696 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
697 printf("%s: unable to create Rx DMA map %d, "
698 "error = %d\n", sc->sc_dev.dv_xname, i, error);
699 goto fail_5;
700 }
701 sc->sc_rxsoft[i].rxs_mbuf = NULL;
702 }
703
704 /*
705 * Reset the chip to a known state.
706 */
707 wm_reset(sc);
708
709 /*
710 * Read the Ethernet address from the EEPROM.
711 */
712 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
713 sizeof(myea) / sizeof(myea[0]), myea);
714 enaddr[0] = myea[0] & 0xff;
715 enaddr[1] = myea[0] >> 8;
716 enaddr[2] = myea[1] & 0xff;
717 enaddr[3] = myea[1] >> 8;
718 enaddr[4] = myea[2] & 0xff;
719 enaddr[5] = myea[2] >> 8;
720
721 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
722 ether_sprintf(enaddr));
723
724 /*
725 * Read the config info from the EEPROM, and set up various
726 * bits in the control registers based on their contents.
727 */
728 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
729 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
730 if (sc->sc_type >= WM_T_82544)
731 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
732
733 if (cfg1 & EEPROM_CFG1_ILOS)
734 sc->sc_ctrl |= CTRL_ILOS;
735 if (sc->sc_type >= WM_T_82544) {
736 sc->sc_ctrl |=
737 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
738 CTRL_SWDPIO_SHIFT;
739 sc->sc_ctrl |=
740 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
741 CTRL_SWDPINS_SHIFT;
742 } else {
743 sc->sc_ctrl |=
744 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
745 CTRL_SWDPIO_SHIFT;
746 }
747
748 #if 0
749 if (sc->sc_type >= WM_T_82544) {
750 if (cfg1 & EEPROM_CFG1_IPS0)
751 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
752 if (cfg1 & EEPROM_CFG1_IPS1)
753 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
754 sc->sc_ctrl_ext |=
755 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
756 CTRL_EXT_SWDPIO_SHIFT;
757 sc->sc_ctrl_ext |=
758 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
759 CTRL_EXT_SWDPINS_SHIFT;
760 } else {
761 sc->sc_ctrl_ext |=
762 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
763 CTRL_EXT_SWDPIO_SHIFT;
764 }
765 #endif
766
767 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
768 #if 0
769 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
770 #endif
771
772 /*
773 * Set up some register offsets that are different between
774 * the i82542 and the i82543 and later chips.
775 */
776 if (sc->sc_type < WM_T_82543) {
777 sc->sc_rdt_reg = WMREG_OLD_RDT0;
778 sc->sc_tdt_reg = WMREG_OLD_TDT;
779 } else {
780 sc->sc_rdt_reg = WMREG_RDT;
781 sc->sc_tdt_reg = WMREG_TDT;
782 }
783
784 /*
785 * Determine if we should use flow control. We should
786 * always use it, unless we're on a i82542 < 2.1.
787 */
788 if (sc->sc_type >= WM_T_82542_2_1)
789 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
790
791 /*
792 * Determine if we're TBI or GMII mode, and initialize the
793 * media structures accordingly.
794 */
795 if (sc->sc_type < WM_T_82543 ||
796 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
797 if (wmp->wmp_flags & WMP_F_1000T)
798 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
799 "product!\n", sc->sc_dev.dv_xname);
800 wm_tbi_mediainit(sc);
801 } else {
802 if (wmp->wmp_flags & WMP_F_1000X)
803 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
804 "product!\n", sc->sc_dev.dv_xname);
805 wm_gmii_mediainit(sc);
806 }
807
808 ifp = &sc->sc_ethercom.ec_if;
809 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
810 ifp->if_softc = sc;
811 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
812 ifp->if_ioctl = wm_ioctl;
813 ifp->if_start = wm_start;
814 ifp->if_watchdog = wm_watchdog;
815 ifp->if_init = wm_init;
816 ifp->if_stop = wm_stop;
817 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
818 IFQ_SET_READY(&ifp->if_snd);
819
820 /*
821 * If we're a i82543 or greater, we can support VLANs.
822 */
823 if (sc->sc_type >= WM_T_82543)
824 sc->sc_ethercom.ec_capabilities |=
825 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
826
827 /*
828 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
829 * on i82543 and later.
830 */
831 if (sc->sc_type >= WM_T_82543)
832 ifp->if_capabilities |=
833 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
834
835 /*
836 * Attach the interface.
837 */
838 if_attach(ifp);
839 ether_ifattach(ifp, enaddr);
840
841 #ifdef WM_EVENT_COUNTERS
842 /* Attach event counters. */
843 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
844 NULL, sc->sc_dev.dv_xname, "txsstall");
845 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
846 NULL, sc->sc_dev.dv_xname, "txdstall");
847 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
848 NULL, sc->sc_dev.dv_xname, "txforceintr");
849 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
850 NULL, sc->sc_dev.dv_xname, "txdw");
851 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
852 NULL, sc->sc_dev.dv_xname, "txqe");
853 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
854 NULL, sc->sc_dev.dv_xname, "rxintr");
855 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
856 NULL, sc->sc_dev.dv_xname, "linkintr");
857
858 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
859 NULL, sc->sc_dev.dv_xname, "rxipsum");
860 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
861 NULL, sc->sc_dev.dv_xname, "rxtusum");
862 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
863 NULL, sc->sc_dev.dv_xname, "txipsum");
864 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
865 NULL, sc->sc_dev.dv_xname, "txtusum");
866
867 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
868 NULL, sc->sc_dev.dv_xname, "txctx init");
869 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, "txctx hit");
871 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
872 NULL, sc->sc_dev.dv_xname, "txctx miss");
873
874 for (i = 0; i < WM_NTXSEGS; i++)
875 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
876 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
877
878 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
879 NULL, sc->sc_dev.dv_xname, "txdrop");
880
881 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
882 NULL, sc->sc_dev.dv_xname, "tu");
883 #endif /* WM_EVENT_COUNTERS */
884
885 /*
886 * Make sure the interface is shutdown during reboot.
887 */
888 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
889 if (sc->sc_sdhook == NULL)
890 printf("%s: WARNING: unable to establish shutdown hook\n",
891 sc->sc_dev.dv_xname);
892 return;
893
894 /*
895 * Free any resources we've allocated during the failed attach
896 * attempt. Do this in reverse order and fall through.
897 */
898 fail_5:
899 for (i = 0; i < WM_NRXDESC; i++) {
900 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
901 bus_dmamap_destroy(sc->sc_dmat,
902 sc->sc_rxsoft[i].rxs_dmamap);
903 }
904 fail_4:
905 for (i = 0; i < WM_TXQUEUELEN; i++) {
906 if (sc->sc_txsoft[i].txs_dmamap != NULL)
907 bus_dmamap_destroy(sc->sc_dmat,
908 sc->sc_txsoft[i].txs_dmamap);
909 }
910 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
911 fail_3:
912 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
913 fail_2:
914 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
915 sizeof(struct wm_control_data));
916 fail_1:
917 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
918 fail_0:
919 return;
920 }
921
922 /*
923 * wm_shutdown:
924 *
925 * Make sure the interface is stopped at reboot time.
926 */
927 void
928 wm_shutdown(void *arg)
929 {
930 struct wm_softc *sc = arg;
931
932 wm_stop(&sc->sc_ethercom.ec_if, 1);
933 }
934
935 /*
936 * wm_tx_cksum:
937 *
938 * Set up TCP/IP checksumming parameters for the
939 * specified packet.
940 */
941 static int
942 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
943 uint32_t *fieldsp)
944 {
945 struct mbuf *m0 = txs->txs_mbuf;
946 struct livengood_tcpip_ctxdesc *t;
947 uint32_t fields = 0, ipcs, tucs;
948 struct ip *ip;
949 struct ether_header *eh;
950 int offset, iphl;
951
952 /*
953 * XXX It would be nice if the mbuf pkthdr had offset
954 * fields for the protocol headers.
955 */
956
957 eh = mtod(m0, struct ether_header *);
958 switch (htons(eh->ether_type)) {
959 case ETHERTYPE_IP:
960 iphl = sizeof(struct ip);
961 offset = ETHER_HDR_LEN;
962 break;
963
964 default:
965 /*
966 * Don't support this protocol or encapsulation.
967 */
968 *fieldsp = 0;
969 *cmdp = 0;
970 return (0);
971 }
972
973 /* XXX */
974 if (m0->m_len < (offset + iphl)) {
975 printf("%s: wm_tx_cksum: need to m_pullup, "
976 "packet dropped\n", sc->sc_dev.dv_xname);
977 return (EINVAL);
978 }
979
980 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
981 iphl = ip->ip_hl << 2;
982
983 /*
984 * NOTE: Even if we're not using the IP or TCP/UDP checksum
985 * offload feature, if we load the context descriptor, we
986 * MUST provide valid values for IPCSS and TUCSS fields.
987 */
988
989 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
990 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
991 fields |= htole32(WTX_IXSM);
992 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
993 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
994 WTX_TCPIP_IPCSE(offset + iphl - 1));
995 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) {
996 /* Use the cached value. */
997 ipcs = sc->sc_txctx_ipcs;
998 } else {
999 /* Just initialize it to the likely value anyway. */
1000 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
1001 WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
1002 WTX_TCPIP_IPCSE(offset + iphl - 1));
1003 }
1004
1005 offset += iphl;
1006
1007 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1008 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
1009 fields |= htole32(WTX_TXSM);
1010 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1011 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
1012 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1013 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) {
1014 /* Use the cached value. */
1015 tucs = sc->sc_txctx_tucs;
1016 } else {
1017 /* Just initialize it to a valid TCP context. */
1018 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
1019 WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
1020 WTX_TCPIP_TUCSE(0) /* rest of packet */);
1021 }
1022
1023 if (sc->sc_txctx_ipcs == ipcs &&
1024 sc->sc_txctx_tucs == tucs) {
1025 /* Cached context is fine. */
1026 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1027 } else {
1028 /* Fill in the context descriptor. */
1029 #ifdef WM_EVENT_COUNTERS
1030 if (sc->sc_txctx_ipcs == 0xffffffff &&
1031 sc->sc_txctx_tucs == 0xffffffff)
1032 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1033 else
1034 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1035 #endif
1036 t = (struct livengood_tcpip_ctxdesc *)
1037 &sc->sc_txdescs[sc->sc_txnext];
1038 t->tcpip_ipcs = ipcs;
1039 t->tcpip_tucs = tucs;
1040 t->tcpip_cmdlen =
1041 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1042 t->tcpip_seg = 0;
1043 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1044
1045 sc->sc_txctx_ipcs = ipcs;
1046 sc->sc_txctx_tucs = tucs;
1047
1048 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1049 txs->txs_ndesc++;
1050 }
1051
1052 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1053 *fieldsp = fields;
1054
1055 return (0);
1056 }
1057
1058 /*
1059 * wm_start: [ifnet interface function]
1060 *
1061 * Start packet transmission on the interface.
1062 */
1063 void
1064 wm_start(struct ifnet *ifp)
1065 {
1066 struct wm_softc *sc = ifp->if_softc;
1067 struct mbuf *m0/*, *m*/;
1068 struct wm_txsoft *txs;
1069 bus_dmamap_t dmamap;
1070 int error, nexttx, lasttx, ofree, seg;
1071 uint32_t cksumcmd, cksumfields;
1072
1073 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1074 return;
1075
1076 /*
1077 * Remember the previous number of free descriptors.
1078 */
1079 ofree = sc->sc_txfree;
1080
1081 /*
1082 * Loop through the send queue, setting up transmit descriptors
1083 * until we drain the queue, or use up all available transmit
1084 * descriptors.
1085 */
1086 for (;;) {
1087 /* Grab a packet off the queue. */
1088 IFQ_POLL(&ifp->if_snd, m0);
1089 if (m0 == NULL)
1090 break;
1091
1092 DPRINTF(WM_DEBUG_TX,
1093 ("%s: TX: have packet to transmit: %p\n",
1094 sc->sc_dev.dv_xname, m0));
1095
1096 /* Get a work queue entry. */
1097 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1098 wm_txintr(sc);
1099 if (sc->sc_txsfree == 0) {
1100 DPRINTF(WM_DEBUG_TX,
1101 ("%s: TX: no free job descriptors\n",
1102 sc->sc_dev.dv_xname));
1103 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1104 break;
1105 }
1106 }
1107
1108 txs = &sc->sc_txsoft[sc->sc_txsnext];
1109 dmamap = txs->txs_dmamap;
1110
1111 /*
1112 * Load the DMA map. If this fails, the packet either
1113 * didn't fit in the allotted number of segments, or we
1114 * were short on resources. For the too-many-segments
1115 * case, we simply report an error and drop the packet,
1116 * since we can't sanely copy a jumbo packet to a single
1117 * buffer.
1118 */
1119 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1120 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1121 if (error) {
1122 if (error == EFBIG) {
1123 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1124 printf("%s: Tx packet consumes too many "
1125 "DMA segments, dropping...\n",
1126 sc->sc_dev.dv_xname);
1127 IFQ_DEQUEUE(&ifp->if_snd, m0);
1128 m_freem(m0);
1129 continue;
1130 }
1131 /*
1132 * Short on resources, just stop for now.
1133 */
1134 DPRINTF(WM_DEBUG_TX,
1135 ("%s: TX: dmamap load failed: %d\n",
1136 sc->sc_dev.dv_xname, error));
1137 break;
1138 }
1139
1140 /*
1141 * Ensure we have enough descriptors free to describe
1142 * the packet. Note, we always reserve one descriptor
1143 * at the end of the ring due to the semantics of the
1144 * TDT register, plus one more in the event we need
1145 * to re-load checksum offload context.
1146 */
1147 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1148 /*
1149 * Not enough free descriptors to transmit this
1150 * packet. We haven't committed anything yet,
1151 * so just unload the DMA map, put the packet
1152 * pack on the queue, and punt. Notify the upper
1153 * layer that there are no more slots left.
1154 */
1155 DPRINTF(WM_DEBUG_TX,
1156 ("%s: TX: need %d descriptors, have %d\n",
1157 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1158 sc->sc_txfree - 1));
1159 ifp->if_flags |= IFF_OACTIVE;
1160 bus_dmamap_unload(sc->sc_dmat, dmamap);
1161 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1162 break;
1163 }
1164
1165 IFQ_DEQUEUE(&ifp->if_snd, m0);
1166
1167 /*
1168 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1169 */
1170
1171 /* Sync the DMA map. */
1172 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1173 BUS_DMASYNC_PREWRITE);
1174
1175 DPRINTF(WM_DEBUG_TX,
1176 ("%s: TX: packet has %d DMA segments\n",
1177 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1178
1179 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1180
1181 /*
1182 * Store a pointer to the packet so that we can free it
1183 * later.
1184 *
1185 * Initially, we consider the number of descriptors the
1186 * packet uses the number of DMA segments. This may be
1187 * incremented by 1 if we do checksum offload (a descriptor
1188 * is used to set the checksum context).
1189 */
1190 txs->txs_mbuf = m0;
1191 txs->txs_firstdesc = sc->sc_txnext;
1192 txs->txs_ndesc = dmamap->dm_nsegs;
1193
1194 /*
1195 * Set up checksum offload parameters for
1196 * this packet.
1197 */
1198 if (m0->m_pkthdr.csum_flags &
1199 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1200 if (wm_tx_cksum(sc, txs, &cksumcmd,
1201 &cksumfields) != 0) {
1202 /* Error message already displayed. */
1203 m_freem(m0);
1204 bus_dmamap_unload(sc->sc_dmat, dmamap);
1205 txs->txs_mbuf = NULL;
1206 continue;
1207 }
1208 } else {
1209 cksumcmd = 0;
1210 cksumfields = 0;
1211 }
1212
1213 cksumcmd |= htole32(WTX_CMD_IDE);
1214
1215 /*
1216 * Initialize the transmit descriptor.
1217 */
1218 for (nexttx = sc->sc_txnext, seg = 0;
1219 seg < dmamap->dm_nsegs;
1220 seg++, nexttx = WM_NEXTTX(nexttx)) {
1221 /*
1222 * Note: we currently only use 32-bit DMA
1223 * addresses.
1224 */
1225 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1226 htole32(dmamap->dm_segs[seg].ds_addr);
1227 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1228 htole32(dmamap->dm_segs[seg].ds_len);
1229 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1230 cksumfields;
1231 lasttx = nexttx;
1232
1233 DPRINTF(WM_DEBUG_TX,
1234 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1235 sc->sc_dev.dv_xname, nexttx,
1236 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1237 (uint32_t) dmamap->dm_segs[seg].ds_len));
1238 }
1239
1240 /*
1241 * Set up the command byte on the last descriptor of
1242 * the packet. If we're in the interrupt delay window,
1243 * delay the interrupt.
1244 */
1245 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1246 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1247
1248 #if 0 /* XXXJRT */
1249 /*
1250 * If VLANs are enabled and the packet has a VLAN tag, set
1251 * up the descriptor to encapsulate the packet for us.
1252 *
1253 * This is only valid on the last descriptor of the packet.
1254 */
1255 if (sc->sc_ethercom.ec_nvlans != 0 &&
1256 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1257 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1258 htole32(WTX_CMD_VLE);
1259 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1260 = htole16(*mtod(m, int *) & 0xffff);
1261 }
1262 #endif /* XXXJRT */
1263
1264 txs->txs_lastdesc = lasttx;
1265
1266 DPRINTF(WM_DEBUG_TX,
1267 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1268 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1269
1270 /* Sync the descriptors we're using. */
1271 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1272 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1273
1274 /* Give the packet to the chip. */
1275 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1276
1277 DPRINTF(WM_DEBUG_TX,
1278 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1279
1280 DPRINTF(WM_DEBUG_TX,
1281 ("%s: TX: finished transmitting packet, job %d\n",
1282 sc->sc_dev.dv_xname, sc->sc_txsnext));
1283
1284 /* Advance the tx pointer. */
1285 sc->sc_txfree -= txs->txs_ndesc;
1286 sc->sc_txnext = nexttx;
1287
1288 sc->sc_txsfree--;
1289 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1290
1291 #if NBPFILTER > 0
1292 /* Pass the packet to any BPF listeners. */
1293 if (ifp->if_bpf)
1294 bpf_mtap(ifp->if_bpf, m0);
1295 #endif /* NBPFILTER > 0 */
1296 }
1297
1298 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1299 /* No more slots; notify upper layer. */
1300 ifp->if_flags |= IFF_OACTIVE;
1301 }
1302
1303 if (sc->sc_txfree != ofree) {
1304 /* Set a watchdog timer in case the chip flakes out. */
1305 ifp->if_timer = 5;
1306 }
1307 }
1308
1309 /*
1310 * wm_watchdog: [ifnet interface function]
1311 *
1312 * Watchdog timer handler.
1313 */
1314 void
1315 wm_watchdog(struct ifnet *ifp)
1316 {
1317 struct wm_softc *sc = ifp->if_softc;
1318
1319 /*
1320 * Since we're using delayed interrupts, sweep up
1321 * before we report an error.
1322 */
1323 wm_txintr(sc);
1324
1325 if (sc->sc_txfree != WM_NTXDESC) {
1326 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1327 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1328 sc->sc_txnext);
1329 ifp->if_oerrors++;
1330
1331 /* Reset the interface. */
1332 (void) wm_init(ifp);
1333 }
1334
1335 /* Try to get more packets going. */
1336 wm_start(ifp);
1337 }
1338
1339 /*
1340 * wm_ioctl: [ifnet interface function]
1341 *
1342 * Handle control requests from the operator.
1343 */
1344 int
1345 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1346 {
1347 struct wm_softc *sc = ifp->if_softc;
1348 struct ifreq *ifr = (struct ifreq *) data;
1349 int s, error;
1350
1351 s = splnet();
1352
1353 switch (cmd) {
1354 case SIOCSIFMEDIA:
1355 case SIOCGIFMEDIA:
1356 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1357 break;
1358
1359 default:
1360 error = ether_ioctl(ifp, cmd, data);
1361 if (error == ENETRESET) {
1362 /*
1363 * Multicast list has changed; set the hardware filter
1364 * accordingly.
1365 */
1366 wm_set_filter(sc);
1367 error = 0;
1368 }
1369 break;
1370 }
1371
1372 /* Try to get more packets going. */
1373 wm_start(ifp);
1374
1375 splx(s);
1376 return (error);
1377 }
1378
1379 /*
1380 * wm_intr:
1381 *
1382 * Interrupt service routine.
1383 */
1384 int
1385 wm_intr(void *arg)
1386 {
1387 struct wm_softc *sc = arg;
1388 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1389 uint32_t icr;
1390 int wantinit, handled = 0;
1391
1392 for (wantinit = 0; wantinit == 0;) {
1393 icr = CSR_READ(sc, WMREG_ICR);
1394 if ((icr & sc->sc_icr) == 0)
1395 break;
1396
1397 handled = 1;
1398
1399 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1400 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1401 DPRINTF(WM_DEBUG_RX,
1402 ("%s: RX: got Rx intr 0x%08x\n",
1403 sc->sc_dev.dv_xname,
1404 icr & (ICR_RXDMT0|ICR_RXT0)));
1405 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1406 }
1407 #endif
1408 wm_rxintr(sc);
1409
1410 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1411 if (icr & ICR_TXDW) {
1412 DPRINTF(WM_DEBUG_TX,
1413 ("%s: TX: got TDXW interrupt\n",
1414 sc->sc_dev.dv_xname));
1415 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1416 }
1417 #endif
1418 wm_txintr(sc);
1419
1420 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1421 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1422 wm_linkintr(sc, icr);
1423 }
1424
1425 if (icr & ICR_RXO) {
1426 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1427 wantinit = 1;
1428 }
1429 }
1430
1431 if (handled) {
1432 if (wantinit)
1433 wm_init(ifp);
1434
1435 /* Try to get more packets going. */
1436 wm_start(ifp);
1437 }
1438
1439 return (handled);
1440 }
1441
1442 /*
1443 * wm_txintr:
1444 *
1445 * Helper; handle transmit interrupts.
1446 */
1447 void
1448 wm_txintr(struct wm_softc *sc)
1449 {
1450 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1451 struct wm_txsoft *txs;
1452 uint8_t status;
1453 int i;
1454
1455 ifp->if_flags &= ~IFF_OACTIVE;
1456
1457 /*
1458 * Go through the Tx list and free mbufs for those
1459 * frams which have been transmitted.
1460 */
1461 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1462 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1463 txs = &sc->sc_txsoft[i];
1464
1465 DPRINTF(WM_DEBUG_TX,
1466 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1467
1468 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1469 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1470
1471 status = le32toh(sc->sc_txdescs[
1472 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1473 if ((status & WTX_ST_DD) == 0)
1474 break;
1475
1476 DPRINTF(WM_DEBUG_TX,
1477 ("%s: TX: job %d done: descs %d..%d\n",
1478 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1479 txs->txs_lastdesc));
1480
1481 /*
1482 * XXX We should probably be using the statistics
1483 * XXX registers, but I don't know if they exist
1484 * XXX on chips before the i82544.
1485 */
1486
1487 #ifdef WM_EVENT_COUNTERS
1488 if (status & WTX_ST_TU)
1489 WM_EVCNT_INCR(&sc->sc_ev_tu);
1490 #endif /* WM_EVENT_COUNTERS */
1491
1492 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1493 ifp->if_oerrors++;
1494 if (status & WTX_ST_LC)
1495 printf("%s: late collision\n",
1496 sc->sc_dev.dv_xname);
1497 else if (status & WTX_ST_EC) {
1498 ifp->if_collisions += 16;
1499 printf("%s: excessive collisions\n",
1500 sc->sc_dev.dv_xname);
1501 }
1502 } else
1503 ifp->if_opackets++;
1504
1505 sc->sc_txfree += txs->txs_ndesc;
1506 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1507 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1508 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1509 m_freem(txs->txs_mbuf);
1510 txs->txs_mbuf = NULL;
1511 }
1512
1513 /* Update the dirty transmit buffer pointer. */
1514 sc->sc_txsdirty = i;
1515 DPRINTF(WM_DEBUG_TX,
1516 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1517
1518 /*
1519 * If there are no more pending transmissions, cancel the watchdog
1520 * timer.
1521 */
1522 if (sc->sc_txsfree == WM_TXQUEUELEN)
1523 ifp->if_timer = 0;
1524 }
1525
1526 /*
1527 * wm_rxintr:
1528 *
1529 * Helper; handle receive interrupts.
1530 */
1531 void
1532 wm_rxintr(struct wm_softc *sc)
1533 {
1534 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1535 struct wm_rxsoft *rxs;
1536 struct mbuf *m;
1537 int i, len;
1538 uint8_t status, errors;
1539
1540 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1541 rxs = &sc->sc_rxsoft[i];
1542
1543 DPRINTF(WM_DEBUG_RX,
1544 ("%s: RX: checking descriptor %d\n",
1545 sc->sc_dev.dv_xname, i));
1546
1547 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1548
1549 status = sc->sc_rxdescs[i].wrx_status;
1550 errors = sc->sc_rxdescs[i].wrx_errors;
1551 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1552
1553 if ((status & WRX_ST_DD) == 0) {
1554 /*
1555 * We have processed all of the receive descriptors.
1556 */
1557 break;
1558 }
1559
1560 if (__predict_false(sc->sc_rxdiscard)) {
1561 DPRINTF(WM_DEBUG_RX,
1562 ("%s: RX: discarding contents of descriptor %d\n",
1563 sc->sc_dev.dv_xname, i));
1564 WM_INIT_RXDESC(sc, i);
1565 if (status & WRX_ST_EOP) {
1566 /* Reset our state. */
1567 DPRINTF(WM_DEBUG_RX,
1568 ("%s: RX: resetting rxdiscard -> 0\n",
1569 sc->sc_dev.dv_xname));
1570 sc->sc_rxdiscard = 0;
1571 }
1572 continue;
1573 }
1574
1575 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1576 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1577
1578 m = rxs->rxs_mbuf;
1579
1580 /*
1581 * Add a new receive buffer to the ring.
1582 */
1583 if (wm_add_rxbuf(sc, i) != 0) {
1584 /*
1585 * Failed, throw away what we've done so
1586 * far, and discard the rest of the packet.
1587 */
1588 ifp->if_ierrors++;
1589 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1590 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1591 WM_INIT_RXDESC(sc, i);
1592 if ((status & WRX_ST_EOP) == 0)
1593 sc->sc_rxdiscard = 1;
1594 if (sc->sc_rxhead != NULL)
1595 m_freem(sc->sc_rxhead);
1596 WM_RXCHAIN_RESET(sc);
1597 DPRINTF(WM_DEBUG_RX,
1598 ("%s: RX: Rx buffer allocation failed, "
1599 "dropping packet%s\n", sc->sc_dev.dv_xname,
1600 sc->sc_rxdiscard ? " (discard)" : ""));
1601 continue;
1602 }
1603
1604 WM_RXCHAIN_LINK(sc, m);
1605
1606 m->m_len = len;
1607
1608 DPRINTF(WM_DEBUG_RX,
1609 ("%s: RX: buffer at %p len %d\n",
1610 sc->sc_dev.dv_xname, m->m_data, len));
1611
1612 /*
1613 * If this is not the end of the packet, keep
1614 * looking.
1615 */
1616 if ((status & WRX_ST_EOP) == 0) {
1617 sc->sc_rxlen += len;
1618 DPRINTF(WM_DEBUG_RX,
1619 ("%s: RX: not yet EOP, rxlen -> %d\n",
1620 sc->sc_dev.dv_xname, sc->sc_rxlen));
1621 continue;
1622 }
1623
1624 /*
1625 * Okay, we have the entire packet now...
1626 */
1627 *sc->sc_rxtailp = NULL;
1628 m = sc->sc_rxhead;
1629 len += sc->sc_rxlen;
1630
1631 WM_RXCHAIN_RESET(sc);
1632
1633 DPRINTF(WM_DEBUG_RX,
1634 ("%s: RX: have entire packet, len -> %d\n",
1635 sc->sc_dev.dv_xname, len));
1636
1637 /*
1638 * If an error occurred, update stats and drop the packet.
1639 */
1640 if (errors &
1641 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1642 ifp->if_ierrors++;
1643 if (errors & WRX_ER_SE)
1644 printf("%s: symbol error\n",
1645 sc->sc_dev.dv_xname);
1646 else if (errors & WRX_ER_SEQ)
1647 printf("%s: receive sequence error\n",
1648 sc->sc_dev.dv_xname);
1649 else if (errors & WRX_ER_CE)
1650 printf("%s: CRC error\n",
1651 sc->sc_dev.dv_xname);
1652 m_freem(m);
1653 continue;
1654 }
1655
1656 /*
1657 * No errors. Receive the packet.
1658 *
1659 * Note, we have configured the chip to include the
1660 * CRC with every packet.
1661 */
1662 m->m_flags |= M_HASFCS;
1663 m->m_pkthdr.rcvif = ifp;
1664 m->m_pkthdr.len = len;
1665
1666 #if 0 /* XXXJRT */
1667 /*
1668 * If VLANs are enabled, VLAN packets have been unwrapped
1669 * for us. Associate the tag with the packet.
1670 */
1671 if (sc->sc_ethercom.ec_nvlans != 0 &&
1672 (status & WRX_ST_VP) != 0) {
1673 struct mbuf *vtag;
1674
1675 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1676 if (vtag == NULL) {
1677 ifp->if_ierrors++;
1678 printf("%s: unable to allocate VLAN tag\n",
1679 sc->sc_dev.dv_xname);
1680 m_freem(m);
1681 continue;
1682 }
1683
1684 *mtod(m, int *) =
1685 le16toh(sc->sc_rxdescs[i].wrx_special);
1686 vtag->m_len = sizeof(int);
1687 }
1688 #endif /* XXXJRT */
1689
1690 /*
1691 * Set up checksum info for this packet.
1692 */
1693 if (status & WRX_ST_IPCS) {
1694 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1695 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1696 if (errors & WRX_ER_IPE)
1697 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1698 }
1699 if (status & WRX_ST_TCPCS) {
1700 /*
1701 * Note: we don't know if this was TCP or UDP,
1702 * so we just set both bits, and expect the
1703 * upper layers to deal.
1704 */
1705 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1706 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1707 if (errors & WRX_ER_TCPE)
1708 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1709 }
1710
1711 ifp->if_ipackets++;
1712
1713 #if NBPFILTER > 0
1714 /* Pass this up to any BPF listeners. */
1715 if (ifp->if_bpf)
1716 bpf_mtap(ifp->if_bpf, m);
1717 #endif /* NBPFILTER > 0 */
1718
1719 /* Pass it on. */
1720 (*ifp->if_input)(ifp, m);
1721 }
1722
1723 /* Update the receive pointer. */
1724 sc->sc_rxptr = i;
1725
1726 DPRINTF(WM_DEBUG_RX,
1727 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1728 }
1729
1730 /*
1731 * wm_linkintr:
1732 *
1733 * Helper; handle link interrupts.
1734 */
1735 void
1736 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1737 {
1738 uint32_t status;
1739
1740 /*
1741 * If we get a link status interrupt on a 1000BASE-T
1742 * device, just fall into the normal MII tick path.
1743 */
1744 if (sc->sc_flags & WM_F_HAS_MII) {
1745 if (icr & ICR_LSC) {
1746 DPRINTF(WM_DEBUG_LINK,
1747 ("%s: LINK: LSC -> mii_tick\n",
1748 sc->sc_dev.dv_xname));
1749 mii_tick(&sc->sc_mii);
1750 } else if (icr & ICR_RXSEQ) {
1751 DPRINTF(WM_DEBUG_LINK,
1752 ("%s: LINK Receive sequence error\n",
1753 sc->sc_dev.dv_xname));
1754 }
1755 return;
1756 }
1757
1758 /*
1759 * If we are now receiving /C/, check for link again in
1760 * a couple of link clock ticks.
1761 */
1762 if (icr & ICR_RXCFG) {
1763 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1764 sc->sc_dev.dv_xname));
1765 sc->sc_tbi_anstate = 2;
1766 }
1767
1768 if (icr & ICR_LSC) {
1769 status = CSR_READ(sc, WMREG_STATUS);
1770 if (status & STATUS_LU) {
1771 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1772 sc->sc_dev.dv_xname,
1773 (status & STATUS_FD) ? "FDX" : "HDX"));
1774 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1775 if (status & STATUS_FD)
1776 sc->sc_tctl |=
1777 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1778 else
1779 sc->sc_tctl |=
1780 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1781 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1782 sc->sc_tbi_linkup = 1;
1783 } else {
1784 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1785 sc->sc_dev.dv_xname));
1786 sc->sc_tbi_linkup = 0;
1787 }
1788 sc->sc_tbi_anstate = 2;
1789 wm_tbi_set_linkled(sc);
1790 } else if (icr & ICR_RXSEQ) {
1791 DPRINTF(WM_DEBUG_LINK,
1792 ("%s: LINK: Receive sequence error\n",
1793 sc->sc_dev.dv_xname));
1794 }
1795 }
1796
1797 /*
1798 * wm_tick:
1799 *
1800 * One second timer, used to check link status, sweep up
1801 * completed transmit jobs, etc.
1802 */
1803 void
1804 wm_tick(void *arg)
1805 {
1806 struct wm_softc *sc = arg;
1807 int s;
1808
1809 s = splnet();
1810
1811 if (sc->sc_flags & WM_F_HAS_MII)
1812 mii_tick(&sc->sc_mii);
1813 else
1814 wm_tbi_check_link(sc);
1815
1816 splx(s);
1817
1818 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1819 }
1820
1821 /*
1822 * wm_reset:
1823 *
1824 * Reset the i82542 chip.
1825 */
1826 void
1827 wm_reset(struct wm_softc *sc)
1828 {
1829 int i;
1830
1831 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1832 delay(10000);
1833
1834 for (i = 0; i < 1000; i++) {
1835 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1836 return;
1837 delay(20);
1838 }
1839
1840 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1841 printf("%s: WARNING: reset failed to complete\n",
1842 sc->sc_dev.dv_xname);
1843 }
1844
1845 /*
1846 * wm_init: [ifnet interface function]
1847 *
1848 * Initialize the interface. Must be called at splnet().
1849 */
1850 int
1851 wm_init(struct ifnet *ifp)
1852 {
1853 struct wm_softc *sc = ifp->if_softc;
1854 struct wm_rxsoft *rxs;
1855 int i, error = 0;
1856 uint32_t reg;
1857
1858 /* Cancel any pending I/O. */
1859 wm_stop(ifp, 0);
1860
1861 /* Reset the chip to a known state. */
1862 wm_reset(sc);
1863
1864 /* Initialize the transmit descriptor ring. */
1865 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1866 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1867 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1868 sc->sc_txfree = WM_NTXDESC;
1869 sc->sc_txnext = 0;
1870
1871 sc->sc_txctx_ipcs = 0xffffffff;
1872 sc->sc_txctx_tucs = 0xffffffff;
1873
1874 if (sc->sc_type < WM_T_82543) {
1875 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1876 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1877 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1878 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1879 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1880 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1881 } else {
1882 CSR_WRITE(sc, WMREG_TBDAH, 0);
1883 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1884 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1885 CSR_WRITE(sc, WMREG_TDH, 0);
1886 CSR_WRITE(sc, WMREG_TDT, 0);
1887 CSR_WRITE(sc, WMREG_TIDV, 128);
1888
1889 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1890 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1891 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1892 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1893 }
1894 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1895 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1896
1897 /* Initialize the transmit job descriptors. */
1898 for (i = 0; i < WM_TXQUEUELEN; i++)
1899 sc->sc_txsoft[i].txs_mbuf = NULL;
1900 sc->sc_txsfree = WM_TXQUEUELEN;
1901 sc->sc_txsnext = 0;
1902 sc->sc_txsdirty = 0;
1903
1904 /*
1905 * Initialize the receive descriptor and receive job
1906 * descriptor rings.
1907 */
1908 if (sc->sc_type < WM_T_82543) {
1909 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1910 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1911 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1912 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1913 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1914 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1915
1916 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1917 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1918 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1919 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1920 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1921 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1922 } else {
1923 CSR_WRITE(sc, WMREG_RDBAH, 0);
1924 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1925 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1926 CSR_WRITE(sc, WMREG_RDH, 0);
1927 CSR_WRITE(sc, WMREG_RDT, 0);
1928 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1929 }
1930 for (i = 0; i < WM_NRXDESC; i++) {
1931 rxs = &sc->sc_rxsoft[i];
1932 if (rxs->rxs_mbuf == NULL) {
1933 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1934 printf("%s: unable to allocate or map rx "
1935 "buffer %d, error = %d\n",
1936 sc->sc_dev.dv_xname, i, error);
1937 /*
1938 * XXX Should attempt to run with fewer receive
1939 * XXX buffers instead of just failing.
1940 */
1941 wm_rxdrain(sc);
1942 goto out;
1943 }
1944 } else
1945 WM_INIT_RXDESC(sc, i);
1946 }
1947 sc->sc_rxptr = 0;
1948 sc->sc_rxdiscard = 0;
1949 WM_RXCHAIN_RESET(sc);
1950
1951 /*
1952 * Clear out the VLAN table -- we don't use it (yet).
1953 */
1954 CSR_WRITE(sc, WMREG_VET, 0);
1955 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1956 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1957
1958 /*
1959 * Set up flow-control parameters.
1960 *
1961 * XXX Values could probably stand some tuning.
1962 */
1963 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1964 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1965 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1966 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1967
1968 if (sc->sc_type < WM_T_82543) {
1969 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1970 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1971 } else {
1972 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1973 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1974 }
1975 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1976 }
1977
1978 #if 0 /* XXXJRT */
1979 /* Deal with VLAN enables. */
1980 if (sc->sc_ethercom.ec_nvlans != 0)
1981 sc->sc_ctrl |= CTRL_VME;
1982 else
1983 #endif /* XXXJRT */
1984 sc->sc_ctrl &= ~CTRL_VME;
1985
1986 /* Write the control registers. */
1987 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1988 #if 0
1989 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1990 #endif
1991
1992 /*
1993 * Set up checksum offload parameters.
1994 */
1995 reg = CSR_READ(sc, WMREG_RXCSUM);
1996 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1997 reg |= RXCSUM_IPOFL;
1998 else
1999 reg &= ~RXCSUM_IPOFL;
2000 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
2001 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
2002 else {
2003 reg &= ~RXCSUM_TUOFL;
2004 if ((ifp->if_capenable & IFCAP_CSUM_IPv4) == 0)
2005 reg &= ~RXCSUM_IPOFL;
2006 }
2007 CSR_WRITE(sc, WMREG_RXCSUM, reg);
2008
2009 /*
2010 * Set up the interrupt registers.
2011 */
2012 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2013 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2014 ICR_RXO | ICR_RXT0;
2015 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2016 sc->sc_icr |= ICR_RXCFG;
2017 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2018
2019 /* Set up the inter-packet gap. */
2020 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2021
2022 #if 0 /* XXXJRT */
2023 /* Set the VLAN ethernetype. */
2024 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2025 #endif
2026
2027 /*
2028 * Set up the transmit control register; we start out with
2029 * a collision distance suitable for FDX, but update it whe
2030 * we resolve the media type.
2031 */
2032 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2033 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2034 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2035
2036 /* Set the media. */
2037 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2038
2039 /*
2040 * Set up the receive control register; we actually program
2041 * the register when we set the receive filter. Use multicast
2042 * address offset type 0.
2043 *
2044 * Only the i82544 has the ability to strip the incoming
2045 * CRC, so we don't enable that feature.
2046 */
2047 sc->sc_mchash_type = 0;
2048 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2049 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2050
2051 /* Set the receive filter. */
2052 wm_set_filter(sc);
2053
2054 /* Start the one second link check clock. */
2055 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2056
2057 /* ...all done! */
2058 ifp->if_flags |= IFF_RUNNING;
2059 ifp->if_flags &= ~IFF_OACTIVE;
2060
2061 out:
2062 if (error)
2063 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2064 return (error);
2065 }
2066
2067 /*
2068 * wm_rxdrain:
2069 *
2070 * Drain the receive queue.
2071 */
2072 void
2073 wm_rxdrain(struct wm_softc *sc)
2074 {
2075 struct wm_rxsoft *rxs;
2076 int i;
2077
2078 for (i = 0; i < WM_NRXDESC; i++) {
2079 rxs = &sc->sc_rxsoft[i];
2080 if (rxs->rxs_mbuf != NULL) {
2081 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2082 m_freem(rxs->rxs_mbuf);
2083 rxs->rxs_mbuf = NULL;
2084 }
2085 }
2086 }
2087
2088 /*
2089 * wm_stop: [ifnet interface function]
2090 *
2091 * Stop transmission on the interface.
2092 */
2093 void
2094 wm_stop(struct ifnet *ifp, int disable)
2095 {
2096 struct wm_softc *sc = ifp->if_softc;
2097 struct wm_txsoft *txs;
2098 int i;
2099
2100 /* Stop the one second clock. */
2101 callout_stop(&sc->sc_tick_ch);
2102
2103 if (sc->sc_flags & WM_F_HAS_MII) {
2104 /* Down the MII. */
2105 mii_down(&sc->sc_mii);
2106 }
2107
2108 /* Stop the transmit and receive processes. */
2109 CSR_WRITE(sc, WMREG_TCTL, 0);
2110 CSR_WRITE(sc, WMREG_RCTL, 0);
2111
2112 /* Release any queued transmit buffers. */
2113 for (i = 0; i < WM_TXQUEUELEN; i++) {
2114 txs = &sc->sc_txsoft[i];
2115 if (txs->txs_mbuf != NULL) {
2116 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2117 m_freem(txs->txs_mbuf);
2118 txs->txs_mbuf = NULL;
2119 }
2120 }
2121
2122 if (disable)
2123 wm_rxdrain(sc);
2124
2125 /* Mark the interface as down and cancel the watchdog timer. */
2126 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2127 ifp->if_timer = 0;
2128 }
2129
2130 /*
2131 * wm_read_eeprom:
2132 *
2133 * Read data from the serial EEPROM.
2134 */
2135 void
2136 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2137 {
2138 uint32_t reg;
2139 int i, x;
2140
2141 for (i = 0; i < wordcnt; i++) {
2142 /* Send CHIP SELECT for one clock tick. */
2143 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2144 delay(2);
2145
2146 /* Shift in the READ command. */
2147 for (x = 3; x > 0; x--) {
2148 reg = EECD_CS;
2149 if (UWIRE_OPC_READ & (1 << (x - 1)))
2150 reg |= EECD_DI;
2151 CSR_WRITE(sc, WMREG_EECD, reg);
2152 delay(2);
2153 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2154 delay(2);
2155 CSR_WRITE(sc, WMREG_EECD, reg);
2156 delay(2);
2157 }
2158
2159 /* Shift in address. */
2160 for (x = 6; x > 0; x--) {
2161 reg = EECD_CS;
2162 if ((word + i) & (1 << (x - 1)))
2163 reg |= EECD_DI;
2164 CSR_WRITE(sc, WMREG_EECD, reg);
2165 delay(2);
2166 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2167 delay(2);
2168 CSR_WRITE(sc, WMREG_EECD, reg);
2169 delay(2);
2170 }
2171
2172 /* Shift out the data. */
2173 reg = EECD_CS;
2174 data[i] = 0;
2175 for (x = 16; x > 0; x--) {
2176 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2177 delay(2);
2178 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2179 data[i] |= (1 << (x - 1));
2180 CSR_WRITE(sc, WMREG_EECD, reg);
2181 delay(2);
2182 }
2183
2184 /* Clear CHIP SELECT. */
2185 CSR_WRITE(sc, WMREG_EECD, 0);
2186 }
2187 }
2188
2189 /*
2190 * wm_add_rxbuf:
2191 *
2192 * Add a receive buffer to the indiciated descriptor.
2193 */
2194 int
2195 wm_add_rxbuf(struct wm_softc *sc, int idx)
2196 {
2197 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2198 struct mbuf *m;
2199 int error;
2200
2201 MGETHDR(m, M_DONTWAIT, MT_DATA);
2202 if (m == NULL)
2203 return (ENOBUFS);
2204
2205 MCLGET(m, M_DONTWAIT);
2206 if ((m->m_flags & M_EXT) == 0) {
2207 m_freem(m);
2208 return (ENOBUFS);
2209 }
2210
2211 if (rxs->rxs_mbuf != NULL)
2212 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2213
2214 rxs->rxs_mbuf = m;
2215
2216 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2217 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2218 BUS_DMA_READ|BUS_DMA_NOWAIT);
2219 if (error) {
2220 printf("%s: unable to load rx DMA map %d, error = %d\n",
2221 sc->sc_dev.dv_xname, idx, error);
2222 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2223 }
2224
2225 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2226 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2227
2228 WM_INIT_RXDESC(sc, idx);
2229
2230 return (0);
2231 }
2232
2233 /*
2234 * wm_set_ral:
2235 *
2236 * Set an entery in the receive address list.
2237 */
2238 static void
2239 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2240 {
2241 uint32_t ral_lo, ral_hi;
2242
2243 if (enaddr != NULL) {
2244 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2245 (enaddr[3] << 24);
2246 ral_hi = enaddr[4] | (enaddr[5] << 8);
2247 ral_hi |= RAL_AV;
2248 } else {
2249 ral_lo = 0;
2250 ral_hi = 0;
2251 }
2252
2253 if (sc->sc_type >= WM_T_82544) {
2254 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2255 ral_lo);
2256 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2257 ral_hi);
2258 } else {
2259 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2260 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2261 }
2262 }
2263
2264 /*
2265 * wm_mchash:
2266 *
2267 * Compute the hash of the multicast address for the 4096-bit
2268 * multicast filter.
2269 */
2270 static uint32_t
2271 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2272 {
2273 static const int lo_shift[4] = { 4, 3, 2, 0 };
2274 static const int hi_shift[4] = { 4, 5, 6, 8 };
2275 uint32_t hash;
2276
2277 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2278 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2279
2280 return (hash & 0xfff);
2281 }
2282
2283 /*
2284 * wm_set_filter:
2285 *
2286 * Set up the receive filter.
2287 */
2288 void
2289 wm_set_filter(struct wm_softc *sc)
2290 {
2291 struct ethercom *ec = &sc->sc_ethercom;
2292 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2293 struct ether_multi *enm;
2294 struct ether_multistep step;
2295 bus_addr_t mta_reg;
2296 uint32_t hash, reg, bit;
2297 int i;
2298
2299 if (sc->sc_type >= WM_T_82544)
2300 mta_reg = WMREG_CORDOVA_MTA;
2301 else
2302 mta_reg = WMREG_MTA;
2303
2304 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2305
2306 if (ifp->if_flags & IFF_BROADCAST)
2307 sc->sc_rctl |= RCTL_BAM;
2308 if (ifp->if_flags & IFF_PROMISC) {
2309 sc->sc_rctl |= RCTL_UPE;
2310 goto allmulti;
2311 }
2312
2313 /*
2314 * Set the station address in the first RAL slot, and
2315 * clear the remaining slots.
2316 */
2317 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2318 for (i = 1; i < WM_RAL_TABSIZE; i++)
2319 wm_set_ral(sc, NULL, i);
2320
2321 /* Clear out the multicast table. */
2322 for (i = 0; i < WM_MC_TABSIZE; i++)
2323 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2324
2325 ETHER_FIRST_MULTI(step, ec, enm);
2326 while (enm != NULL) {
2327 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2328 /*
2329 * We must listen to a range of multicast addresses.
2330 * For now, just accept all multicasts, rather than
2331 * trying to set only those filter bits needed to match
2332 * the range. (At this time, the only use of address
2333 * ranges is for IP multicast routing, for which the
2334 * range is big enough to require all bits set.)
2335 */
2336 goto allmulti;
2337 }
2338
2339 hash = wm_mchash(sc, enm->enm_addrlo);
2340
2341 reg = (hash >> 5) & 0x7f;
2342 bit = hash & 0x1f;
2343
2344 hash = CSR_READ(sc, mta_reg + (reg << 2));
2345 hash |= 1U << bit;
2346
2347 /* XXX Hardware bug?? */
2348 if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
2349 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2350 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2351 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2352 } else
2353 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2354
2355 ETHER_NEXT_MULTI(step, enm);
2356 }
2357
2358 ifp->if_flags &= ~IFF_ALLMULTI;
2359 goto setit;
2360
2361 allmulti:
2362 ifp->if_flags |= IFF_ALLMULTI;
2363 sc->sc_rctl |= RCTL_MPE;
2364
2365 setit:
2366 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2367 }
2368
2369 /*
2370 * wm_tbi_mediainit:
2371 *
2372 * Initialize media for use on 1000BASE-X devices.
2373 */
2374 void
2375 wm_tbi_mediainit(struct wm_softc *sc)
2376 {
2377 const char *sep = "";
2378
2379 if (sc->sc_type < WM_T_82543)
2380 sc->sc_tipg = TIPG_WM_DFLT;
2381 else
2382 sc->sc_tipg = TIPG_LG_DFLT;
2383
2384 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2385 wm_tbi_mediastatus);
2386
2387 /*
2388 * SWD Pins:
2389 *
2390 * 0 = Link LED (output)
2391 * 1 = Loss Of Signal (input)
2392 */
2393 sc->sc_ctrl |= CTRL_SWDPIO(0);
2394 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2395
2396 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2397
2398 #define ADD(s, m, d) \
2399 do { \
2400 printf("%s%s", sep, s); \
2401 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2402 sep = ", "; \
2403 } while (/*CONSTCOND*/0)
2404
2405 printf("%s: ", sc->sc_dev.dv_xname);
2406 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2407 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2408 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2409 printf("\n");
2410
2411 #undef ADD
2412
2413 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2414 }
2415
2416 /*
2417 * wm_tbi_mediastatus: [ifmedia interface function]
2418 *
2419 * Get the current interface media status on a 1000BASE-X device.
2420 */
2421 void
2422 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2423 {
2424 struct wm_softc *sc = ifp->if_softc;
2425
2426 ifmr->ifm_status = IFM_AVALID;
2427 ifmr->ifm_active = IFM_ETHER;
2428
2429 if (sc->sc_tbi_linkup == 0) {
2430 ifmr->ifm_active |= IFM_NONE;
2431 return;
2432 }
2433
2434 ifmr->ifm_status |= IFM_ACTIVE;
2435 ifmr->ifm_active |= IFM_1000_SX;
2436 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2437 ifmr->ifm_active |= IFM_FDX;
2438 }
2439
2440 /*
2441 * wm_tbi_mediachange: [ifmedia interface function]
2442 *
2443 * Set hardware to newly-selected media on a 1000BASE-X device.
2444 */
2445 int
2446 wm_tbi_mediachange(struct ifnet *ifp)
2447 {
2448 struct wm_softc *sc = ifp->if_softc;
2449 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2450 uint32_t status;
2451 int i;
2452
2453 sc->sc_txcw = ife->ifm_data;
2454 if (sc->sc_ctrl & CTRL_RFCE)
2455 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2456 if (sc->sc_ctrl & CTRL_TFCE)
2457 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2458 sc->sc_txcw |= TXCW_ANE;
2459
2460 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2461 delay(10000);
2462
2463 sc->sc_tbi_anstate = 0;
2464
2465 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2466 /* Have signal; wait for the link to come up. */
2467 for (i = 0; i < 50; i++) {
2468 delay(10000);
2469 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2470 break;
2471 }
2472
2473 status = CSR_READ(sc, WMREG_STATUS);
2474 if (status & STATUS_LU) {
2475 /* Link is up. */
2476 DPRINTF(WM_DEBUG_LINK,
2477 ("%s: LINK: set media -> link up %s\n",
2478 sc->sc_dev.dv_xname,
2479 (status & STATUS_FD) ? "FDX" : "HDX"));
2480 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2481 if (status & STATUS_FD)
2482 sc->sc_tctl |=
2483 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2484 else
2485 sc->sc_tctl |=
2486 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2487 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2488 sc->sc_tbi_linkup = 1;
2489 } else {
2490 /* Link is down. */
2491 DPRINTF(WM_DEBUG_LINK,
2492 ("%s: LINK: set media -> link down\n",
2493 sc->sc_dev.dv_xname));
2494 sc->sc_tbi_linkup = 0;
2495 }
2496 } else {
2497 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2498 sc->sc_dev.dv_xname));
2499 sc->sc_tbi_linkup = 0;
2500 }
2501
2502 wm_tbi_set_linkled(sc);
2503
2504 return (0);
2505 }
2506
2507 /*
2508 * wm_tbi_set_linkled:
2509 *
2510 * Update the link LED on 1000BASE-X devices.
2511 */
2512 void
2513 wm_tbi_set_linkled(struct wm_softc *sc)
2514 {
2515
2516 if (sc->sc_tbi_linkup)
2517 sc->sc_ctrl |= CTRL_SWDPIN(0);
2518 else
2519 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2520
2521 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2522 }
2523
2524 /*
2525 * wm_tbi_check_link:
2526 *
2527 * Check the link on 1000BASE-X devices.
2528 */
2529 void
2530 wm_tbi_check_link(struct wm_softc *sc)
2531 {
2532 uint32_t rxcw, ctrl, status;
2533
2534 if (sc->sc_tbi_anstate == 0)
2535 return;
2536 else if (sc->sc_tbi_anstate > 1) {
2537 DPRINTF(WM_DEBUG_LINK,
2538 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2539 sc->sc_tbi_anstate));
2540 sc->sc_tbi_anstate--;
2541 return;
2542 }
2543
2544 sc->sc_tbi_anstate = 0;
2545
2546 rxcw = CSR_READ(sc, WMREG_RXCW);
2547 ctrl = CSR_READ(sc, WMREG_CTRL);
2548 status = CSR_READ(sc, WMREG_STATUS);
2549
2550 if ((status & STATUS_LU) == 0) {
2551 DPRINTF(WM_DEBUG_LINK,
2552 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2553 sc->sc_tbi_linkup = 0;
2554 } else {
2555 DPRINTF(WM_DEBUG_LINK,
2556 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2557 (status & STATUS_FD) ? "FDX" : "HDX"));
2558 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2559 if (status & STATUS_FD)
2560 sc->sc_tctl |=
2561 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2562 else
2563 sc->sc_tctl |=
2564 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2565 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2566 sc->sc_tbi_linkup = 1;
2567 }
2568
2569 wm_tbi_set_linkled(sc);
2570 }
2571
2572 /*
2573 * wm_gmii_reset:
2574 *
2575 * Reset the PHY.
2576 */
2577 void
2578 wm_gmii_reset(struct wm_softc *sc)
2579 {
2580 uint32_t reg;
2581
2582 if (sc->sc_type >= WM_T_82544) {
2583 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2584 delay(20000);
2585
2586 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2587 delay(20000);
2588 } else {
2589 /* The PHY reset pin is active-low. */
2590 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2591 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2592 CTRL_EXT_SWDPIN(4));
2593 reg |= CTRL_EXT_SWDPIO(4);
2594
2595 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2596 delay(10);
2597
2598 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2599 delay(10);
2600
2601 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2602 delay(10);
2603 #if 0
2604 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2605 #endif
2606 }
2607 }
2608
2609 /*
2610 * wm_gmii_mediainit:
2611 *
2612 * Initialize media for use on 1000BASE-T devices.
2613 */
2614 void
2615 wm_gmii_mediainit(struct wm_softc *sc)
2616 {
2617 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2618
2619 /* We have MII. */
2620 sc->sc_flags |= WM_F_HAS_MII;
2621
2622 sc->sc_tipg = TIPG_1000T_DFLT;
2623
2624 /*
2625 * Let the chip set speed/duplex on its own based on
2626 * signals from the PHY.
2627 */
2628 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2629 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2630
2631 /* Initialize our media structures and probe the GMII. */
2632 sc->sc_mii.mii_ifp = ifp;
2633
2634 if (sc->sc_type >= WM_T_82544) {
2635 sc->sc_mii.mii_readreg = wm_gmii_i82544_readreg;
2636 sc->sc_mii.mii_writereg = wm_gmii_i82544_writereg;
2637 } else {
2638 sc->sc_mii.mii_readreg = wm_gmii_i82543_readreg;
2639 sc->sc_mii.mii_writereg = wm_gmii_i82543_writereg;
2640 }
2641 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2642
2643 wm_gmii_reset(sc);
2644
2645 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2646 wm_gmii_mediastatus);
2647
2648 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2649 MII_OFFSET_ANY, 0);
2650 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2651 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2652 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2653 } else
2654 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2655 }
2656
2657 /*
2658 * wm_gmii_mediastatus: [ifmedia interface function]
2659 *
2660 * Get the current interface media status on a 1000BASE-T device.
2661 */
2662 void
2663 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2664 {
2665 struct wm_softc *sc = ifp->if_softc;
2666
2667 mii_pollstat(&sc->sc_mii);
2668 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2669 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2670 }
2671
2672 /*
2673 * wm_gmii_mediachange: [ifmedia interface function]
2674 *
2675 * Set hardware to newly-selected media on a 1000BASE-T device.
2676 */
2677 int
2678 wm_gmii_mediachange(struct ifnet *ifp)
2679 {
2680 struct wm_softc *sc = ifp->if_softc;
2681
2682 if (ifp->if_flags & IFF_UP)
2683 mii_mediachg(&sc->sc_mii);
2684 return (0);
2685 }
2686
2687 #define MDI_IO CTRL_SWDPIN(2)
2688 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2689 #define MDI_CLK CTRL_SWDPIN(3)
2690
2691 static void
2692 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2693 {
2694 uint32_t i, v;
2695
2696 v = CSR_READ(sc, WMREG_CTRL);
2697 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2698 v |= MDI_DIR | CTRL_SWDPIO(3);
2699
2700 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2701 if (data & i)
2702 v |= MDI_IO;
2703 else
2704 v &= ~MDI_IO;
2705 CSR_WRITE(sc, WMREG_CTRL, v);
2706 delay(10);
2707 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2708 delay(10);
2709 CSR_WRITE(sc, WMREG_CTRL, v);
2710 delay(10);
2711 }
2712 }
2713
2714 static uint32_t
2715 i82543_mii_recvbits(struct wm_softc *sc)
2716 {
2717 uint32_t v, i, data = 0;
2718
2719 v = CSR_READ(sc, WMREG_CTRL);
2720 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2721 v |= CTRL_SWDPIO(3);
2722
2723 CSR_WRITE(sc, WMREG_CTRL, v);
2724 delay(10);
2725 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2726 delay(10);
2727 CSR_WRITE(sc, WMREG_CTRL, v);
2728 delay(10);
2729
2730 for (i = 0; i < 16; i++) {
2731 data <<= 1;
2732 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2733 delay(10);
2734 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2735 data |= 1;
2736 CSR_WRITE(sc, WMREG_CTRL, v);
2737 delay(10);
2738 }
2739
2740 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2741 delay(10);
2742 CSR_WRITE(sc, WMREG_CTRL, v);
2743 delay(10);
2744
2745 return (data);
2746 }
2747
2748 #undef MDI_IO
2749 #undef MDI_DIR
2750 #undef MDI_CLK
2751
2752 /*
2753 * wm_gmii_i82543_readreg: [mii interface function]
2754 *
2755 * Read a PHY register on the GMII (i82543 version).
2756 */
2757 int
2758 wm_gmii_i82543_readreg(struct device *self, int phy, int reg)
2759 {
2760 struct wm_softc *sc = (void *) self;
2761 int rv;
2762
2763 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2764 i82543_mii_sendbits(sc, reg | (phy << 5) |
2765 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2766 rv = i82543_mii_recvbits(sc) & 0xffff;
2767
2768 DPRINTF(WM_DEBUG_GMII,
2769 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2770 sc->sc_dev.dv_xname, phy, reg, rv));
2771
2772 return (rv);
2773 }
2774
2775 /*
2776 * wm_gmii_i82543_writereg: [mii interface function]
2777 *
2778 * Write a PHY register on the GMII (i82543 version).
2779 */
2780 void
2781 wm_gmii_i82543_writereg(struct device *self, int phy, int reg, int val)
2782 {
2783 struct wm_softc *sc = (void *) self;
2784
2785 i82543_mii_sendbits(sc, 0xffffffffU, 32);
2786 i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2787 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2788 (MII_COMMAND_START << 30), 32);
2789 }
2790
2791 /*
2792 * wm_gmii_i82544_readreg: [mii interface function]
2793 *
2794 * Read a PHY register on the GMII.
2795 */
2796 int
2797 wm_gmii_i82544_readreg(struct device *self, int phy, int reg)
2798 {
2799 struct wm_softc *sc = (void *) self;
2800 uint32_t mdic;
2801 int i, rv;
2802
2803 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2804 MDIC_REGADD(reg));
2805
2806 for (i = 0; i < 100; i++) {
2807 mdic = CSR_READ(sc, WMREG_MDIC);
2808 if (mdic & MDIC_READY)
2809 break;
2810 delay(10);
2811 }
2812
2813 if ((mdic & MDIC_READY) == 0) {
2814 printf("%s: MDIC read timed out: phy %d reg %d\n",
2815 sc->sc_dev.dv_xname, phy, reg);
2816 rv = 0;
2817 } else if (mdic & MDIC_E) {
2818 #if 0 /* This is normal if no PHY is present. */
2819 printf("%s: MDIC read error: phy %d reg %d\n",
2820 sc->sc_dev.dv_xname, phy, reg);
2821 #endif
2822 rv = 0;
2823 } else {
2824 rv = MDIC_DATA(mdic);
2825 if (rv == 0xffff)
2826 rv = 0;
2827 }
2828
2829 return (rv);
2830 }
2831
2832 /*
2833 * wm_gmii_i82544_writereg: [mii interface function]
2834 *
2835 * Write a PHY register on the GMII.
2836 */
2837 void
2838 wm_gmii_i82544_writereg(struct device *self, int phy, int reg, int val)
2839 {
2840 struct wm_softc *sc = (void *) self;
2841 uint32_t mdic;
2842 int i;
2843
2844 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2845 MDIC_REGADD(reg) | MDIC_DATA(val));
2846
2847 for (i = 0; i < 100; i++) {
2848 mdic = CSR_READ(sc, WMREG_MDIC);
2849 if (mdic & MDIC_READY)
2850 break;
2851 delay(10);
2852 }
2853
2854 if ((mdic & MDIC_READY) == 0)
2855 printf("%s: MDIC write timed out: phy %d reg %d\n",
2856 sc->sc_dev.dv_xname, phy, reg);
2857 else if (mdic & MDIC_E)
2858 printf("%s: MDIC write error: phy %d reg %d\n",
2859 sc->sc_dev.dv_xname, phy, reg);
2860 }
2861
2862 /*
2863 * wm_gmii_statchg: [mii interface function]
2864 *
2865 * Callback from MII layer when media changes.
2866 */
2867 void
2868 wm_gmii_statchg(struct device *self)
2869 {
2870 struct wm_softc *sc = (void *) self;
2871
2872 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2873
2874 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2875 DPRINTF(WM_DEBUG_LINK,
2876 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2877 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2878 } else {
2879 DPRINTF(WM_DEBUG_LINK,
2880 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2881 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2882 }
2883
2884 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2885 }
2886