if_wm.c revision 1.8 1 /* $NetBSD: if_wm.c,v 1.8 2002/05/09 00:41:06 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40 * and i82544 (``Cordova'') Gigabit Ethernet chips.
41 *
42 * TODO (in order of importance):
43 *
44 * - Fix hw VLAN assist.
45 *
46 * - Make GMII work on the Livengood.
47 *
48 * - Fix out-bound IP header checksums.
49 *
50 * - Fix UDP checksums.
51 *
52 * - Jumbo frames -- requires changes to network stack due to
53 * lame buffer length handling on chip.
54 *
55 * ...and, of course, performance tuning.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and mange
117 * up to 32 of them at a time. We allow up to 16 DMA segments per
118 * packet.
119 */
120 #define WM_NTXSEGS 16
121 #define WM_IFQUEUELEN 256
122 #define WM_TXQUEUELEN 32
123 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * The interrupt mitigation feature of the Wiseman is pretty cool -- as
131 * long as you're transmitting, you don't have to take an interrupt at
132 * all. However, we force an interrupt to happen every N + 1 packets
133 * in order to kick us in a reasonable amount of time when we run out
134 * of descriptors.
135 */
136 #define WM_TXINTR_MASK 7
137
138 /*
139 * Receive descriptor list size. We have one Rx buffer for normal
140 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
141 * packet. We allocate 128 receive descriptors, each with a 2k
142 * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
143 */
144 #define WM_NRXDESC 128
145 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
146 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
147 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
148
149 /*
150 * Control structures are DMA'd to the i82542 chip. We allocate them in
151 * a single clump that maps to a single DMA segment to make serveral things
152 * easier.
153 */
154 struct wm_control_data {
155 /*
156 * The transmit descriptors.
157 */
158 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
159
160 /*
161 * The receive descriptors.
162 */
163 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
164 };
165
166 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
167 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
168 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
169
170 /*
171 * Software state for transmit jobs.
172 */
173 struct wm_txsoft {
174 struct mbuf *txs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t txs_dmamap; /* our DMA map */
176 int txs_firstdesc; /* first descriptor in packet */
177 int txs_lastdesc; /* last descriptor in packet */
178 int txs_ndesc; /* # of descriptors used */
179 };
180
181 /*
182 * Software state for receive buffers. Each descriptor gets a
183 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
184 * more than one buffer, we chain them together.
185 */
186 struct wm_rxsoft {
187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
188 bus_dmamap_t rxs_dmamap; /* our DMA map */
189 };
190
191 /*
192 * Software state per device.
193 */
194 struct wm_softc {
195 struct device sc_dev; /* generic device information */
196 bus_space_tag_t sc_st; /* bus space tag */
197 bus_space_handle_t sc_sh; /* bus space handle */
198 bus_dma_tag_t sc_dmat; /* bus DMA tag */
199 struct ethercom sc_ethercom; /* ethernet common data */
200 void *sc_sdhook; /* shutdown hook */
201
202 int sc_type; /* chip type; see below */
203 int sc_flags; /* flags; see below */
204
205 void *sc_ih; /* interrupt cookie */
206
207 struct mii_data sc_mii; /* MII/media information */
208
209 struct callout sc_tick_ch; /* tick callout */
210
211 bus_dmamap_t sc_cddmamap; /* control data DMA map */
212 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
213
214 /*
215 * Software state for the transmit and receive descriptors.
216 */
217 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
218 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
219
220 /*
221 * Control data structures.
222 */
223 struct wm_control_data *sc_control_data;
224 #define sc_txdescs sc_control_data->wcd_txdescs
225 #define sc_rxdescs sc_control_data->wcd_rxdescs
226
227 #ifdef WM_EVENT_COUNTERS
228 /* Event counters. */
229 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
230 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
231 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
232 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
233 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
234 struct evcnt sc_ev_rxintr; /* Rx interrupts */
235 struct evcnt sc_ev_linkintr; /* Link interrupts */
236
237 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
238 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
239 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
240 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
241
242 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
243 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
244 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
245
246 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
247 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
248
249 struct evcnt sc_ev_tu; /* Tx underrun */
250 #endif /* WM_EVENT_COUNTERS */
251
252 bus_addr_t sc_tdt_reg; /* offset of TDT register */
253
254 int sc_txfree; /* number of free Tx descriptors */
255 int sc_txnext; /* next ready Tx descriptor */
256 int sc_txwin; /* Tx descriptors since last Tx int */
257
258 int sc_txsfree; /* number of free Tx jobs */
259 int sc_txsnext; /* next free Tx job */
260 int sc_txsdirty; /* dirty Tx jobs */
261
262 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
263 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
264
265 bus_addr_t sc_rdt_reg; /* offset of RDT register */
266
267 int sc_rxptr; /* next ready Rx descriptor/queue ent */
268 int sc_rxdiscard;
269 int sc_rxlen;
270 struct mbuf *sc_rxhead;
271 struct mbuf *sc_rxtail;
272 struct mbuf **sc_rxtailp;
273
274 uint32_t sc_ctrl; /* prototype CTRL register */
275 #if 0
276 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
277 #endif
278 uint32_t sc_icr; /* prototype interrupt bits */
279 uint32_t sc_tctl; /* prototype TCTL register */
280 uint32_t sc_rctl; /* prototype RCTL register */
281 uint32_t sc_txcw; /* prototype TXCW register */
282 uint32_t sc_tipg; /* prototype TIPG register */
283
284 int sc_tbi_linkup; /* TBI link status */
285 int sc_tbi_anstate; /* autonegotiation state */
286
287 int sc_mchash_type; /* multicast filter offset */
288 };
289
290 #define WM_RXCHAIN_RESET(sc) \
291 do { \
292 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
293 *(sc)->sc_rxtailp = NULL; \
294 (sc)->sc_rxlen = 0; \
295 } while (/*CONSTCOND*/0)
296
297 #define WM_RXCHAIN_LINK(sc, m) \
298 do { \
299 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
300 (sc)->sc_rxtailp = &(m)->m_next; \
301 } while (/*CONSTCOND*/0)
302
303 /* sc_type */
304 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
305 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
306 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
307 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
308
309 /* sc_flags */
310 #define WM_F_HAS_MII 0x01 /* has MII */
311
312 #ifdef WM_EVENT_COUNTERS
313 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
314 #else
315 #define WM_EVCNT_INCR(ev) /* nothing */
316 #endif
317
318 #define CSR_READ(sc, reg) \
319 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
320 #define CSR_WRITE(sc, reg, val) \
321 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
322
323 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
324 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
325
326 #define WM_CDTXSYNC(sc, x, n, ops) \
327 do { \
328 int __x, __n; \
329 \
330 __x = (x); \
331 __n = (n); \
332 \
333 /* If it will wrap around, sync to the end of the ring. */ \
334 if ((__x + __n) > WM_NTXDESC) { \
335 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
336 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
337 (WM_NTXDESC - __x), (ops)); \
338 __n -= (WM_NTXDESC - __x); \
339 __x = 0; \
340 } \
341 \
342 /* Now sync whatever is left. */ \
343 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
344 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
345 } while (/*CONSTCOND*/0)
346
347 #define WM_CDRXSYNC(sc, x, ops) \
348 do { \
349 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
350 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
351 } while (/*CONSTCOND*/0)
352
353 #define WM_INIT_RXDESC(sc, x) \
354 do { \
355 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
356 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
357 struct mbuf *__m = __rxs->rxs_mbuf; \
358 \
359 /* \
360 * Note: We scoot the packet forward 2 bytes in the buffer \
361 * so that the payload after the Ethernet header is aligned \
362 * to a 4-byte boundary. \
363 * \
364 * XXX BRAINDAMAGE ALERT! \
365 * The stupid chip uses the same size for every buffer, which \
366 * is set in the Receive Control register. We are using the 2K \
367 * size option, but what we REALLY want is (2K - 2)! For this \
368 * reason, we can't accept packets longer than the standard \
369 * Ethernet MTU, without incurring a big penalty to copy every \
370 * incoming packet to a new, suitably aligned buffer. \
371 * \
372 * We'll need to make some changes to the layer 3/4 parts of \
373 * the stack (to copy the headers to a new buffer if not \
374 * aligned) in order to support large MTU on this chip. Lame. \
375 */ \
376 __m->m_data = __m->m_ext.ext_buf + 2; \
377 \
378 __rxd->wrx_addr.wa_low = \
379 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
380 __rxd->wrx_addr.wa_high = 0; \
381 __rxd->wrx_len = 0; \
382 __rxd->wrx_cksum = 0; \
383 __rxd->wrx_status = 0; \
384 __rxd->wrx_errors = 0; \
385 __rxd->wrx_special = 0; \
386 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
387 \
388 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
389 } while (/*CONSTCOND*/0)
390
391 void wm_start(struct ifnet *);
392 void wm_watchdog(struct ifnet *);
393 int wm_ioctl(struct ifnet *, u_long, caddr_t);
394 int wm_init(struct ifnet *);
395 void wm_stop(struct ifnet *, int);
396
397 void wm_shutdown(void *);
398
399 void wm_reset(struct wm_softc *);
400 void wm_rxdrain(struct wm_softc *);
401 int wm_add_rxbuf(struct wm_softc *, int);
402 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
403 void wm_tick(void *);
404
405 void wm_set_filter(struct wm_softc *);
406
407 int wm_intr(void *);
408 void wm_txintr(struct wm_softc *);
409 void wm_rxintr(struct wm_softc *);
410 void wm_linkintr(struct wm_softc *, uint32_t);
411
412 void wm_tbi_mediainit(struct wm_softc *);
413 int wm_tbi_mediachange(struct ifnet *);
414 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
415
416 void wm_tbi_set_linkled(struct wm_softc *);
417 void wm_tbi_check_link(struct wm_softc *);
418
419 void wm_gmii_reset(struct wm_softc *);
420
421 int wm_gmii_livengood_readreg(struct device *, int, int);
422 void wm_gmii_livengood_writereg(struct device *, int, int, int);
423
424 int wm_gmii_cordova_readreg(struct device *, int, int);
425 void wm_gmii_cordova_writereg(struct device *, int, int, int);
426
427 void wm_gmii_statchg(struct device *);
428
429 void wm_gmii_mediainit(struct wm_softc *);
430 int wm_gmii_mediachange(struct ifnet *);
431 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
432
433 int wm_match(struct device *, struct cfdata *, void *);
434 void wm_attach(struct device *, struct device *, void *);
435
436 int wm_copy_small = 0;
437
438 struct cfattach wm_ca = {
439 sizeof(struct wm_softc), wm_match, wm_attach,
440 };
441
442 /*
443 * Devices supported by this driver.
444 */
445 const struct wm_product {
446 pci_vendor_id_t wmp_vendor;
447 pci_product_id_t wmp_product;
448 const char *wmp_name;
449 int wmp_type;
450 int wmp_flags;
451 #define WMP_F_1000X 0x01
452 #define WMP_F_1000T 0x02
453 } wm_products[] = {
454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
455 "Intel i82542 1000BASE-X Ethernet",
456 WM_T_WISEMAN_2_1, WMP_F_1000X },
457
458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
459 "Intel i82543 1000BASE-X Ethernet",
460 WM_T_LIVENGOOD, WMP_F_1000X },
461
462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
463 "Intel i82543-SC 1000BASE-X Ethernet",
464 WM_T_LIVENGOOD, WMP_F_1000X },
465
466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
467 "Intel i82543 1000BASE-T Ethernet",
468 WM_T_LIVENGOOD, WMP_F_1000T },
469
470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
471 "Intel i82544 1000BASE-T Ethernet",
472 WM_T_CORDOVA, WMP_F_1000T },
473
474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
475 "Intel i82544 1000BASE-X Ethernet",
476 WM_T_CORDOVA, WMP_F_1000X },
477
478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
479 "Intel i82544GC 1000BASE-T Ethernet",
480 WM_T_CORDOVA, WMP_F_1000T },
481
482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
483 "Intel i82544GC 1000BASE-T Ethernet",
484 WM_T_CORDOVA, WMP_F_1000T },
485
486 { 0, 0,
487 NULL,
488 0, 0 },
489 };
490
491 #ifdef WM_EVENT_COUNTERS
492 #if WM_NTXSEGS != 16
493 #error Update wm_txseg_evcnt_names
494 #endif
495 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
496 "txseg1",
497 "txseg2",
498 "txseg3",
499 "txseg4",
500 "txseg5",
501 "txseg6",
502 "txseg7",
503 "txseg8",
504 "txseg9",
505 "txseg10",
506 "txseg11",
507 "txseg12",
508 "txseg13",
509 "txseg14",
510 "txseg15",
511 "txseg16",
512 };
513 #endif /* WM_EVENT_COUNTERS */
514
515 static const struct wm_product *
516 wm_lookup(const struct pci_attach_args *pa)
517 {
518 const struct wm_product *wmp;
519
520 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
521 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
522 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
523 return (wmp);
524 }
525 return (NULL);
526 }
527
528 int
529 wm_match(struct device *parent, struct cfdata *cf, void *aux)
530 {
531 struct pci_attach_args *pa = aux;
532
533 if (wm_lookup(pa) != NULL)
534 return (1);
535
536 return (0);
537 }
538
539 void
540 wm_attach(struct device *parent, struct device *self, void *aux)
541 {
542 struct wm_softc *sc = (void *) self;
543 struct pci_attach_args *pa = aux;
544 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
545 pci_chipset_tag_t pc = pa->pa_pc;
546 pci_intr_handle_t ih;
547 const char *intrstr = NULL;
548 bus_space_tag_t memt;
549 bus_space_handle_t memh;
550 bus_dma_segment_t seg;
551 int memh_valid;
552 int i, rseg, error;
553 const struct wm_product *wmp;
554 uint8_t enaddr[ETHER_ADDR_LEN];
555 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
556 pcireg_t preg, memtype;
557 int pmreg;
558
559 callout_init(&sc->sc_tick_ch);
560
561 wmp = wm_lookup(pa);
562 if (wmp == NULL) {
563 printf("\n");
564 panic("wm_attach: impossible");
565 }
566
567 sc->sc_dmat = pa->pa_dmat;
568
569 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
570 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
571
572 sc->sc_type = wmp->wmp_type;
573 if (sc->sc_type < WM_T_LIVENGOOD) {
574 if (preg < 2) {
575 printf("%s: Wiseman must be at least rev. 2\n",
576 sc->sc_dev.dv_xname);
577 return;
578 }
579 if (preg < 3)
580 sc->sc_type = WM_T_WISEMAN_2_0;
581 }
582
583 /*
584 * Map the device.
585 */
586 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
587 switch (memtype) {
588 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
589 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
590 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
591 memtype, 0, &memt, &memh, NULL, NULL) == 0);
592 break;
593 default:
594 memh_valid = 0;
595 }
596
597 if (memh_valid) {
598 sc->sc_st = memt;
599 sc->sc_sh = memh;
600 } else {
601 printf("%s: unable to map device registers\n",
602 sc->sc_dev.dv_xname);
603 return;
604 }
605
606 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
607 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
608 preg |= PCI_COMMAND_MASTER_ENABLE;
609 if (sc->sc_type < WM_T_WISEMAN_2_1)
610 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
611 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
612
613 /* Get it out of power save mode, if needed. */
614 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
615 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
616 if (preg == 3) {
617 /*
618 * The card has lost all configuration data in
619 * this state, so punt.
620 */
621 printf("%s: unable to wake from power state D3\n",
622 sc->sc_dev.dv_xname);
623 return;
624 }
625 if (preg != 0) {
626 printf("%s: waking up from power state D%d\n",
627 sc->sc_dev.dv_xname, preg);
628 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
629 }
630 }
631
632 /*
633 * Map and establish our interrupt.
634 */
635 if (pci_intr_map(pa, &ih)) {
636 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
637 return;
638 }
639 intrstr = pci_intr_string(pc, ih);
640 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
641 if (sc->sc_ih == NULL) {
642 printf("%s: unable to establish interrupt",
643 sc->sc_dev.dv_xname);
644 if (intrstr != NULL)
645 printf(" at %s", intrstr);
646 printf("\n");
647 return;
648 }
649 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
650
651 /*
652 * Allocate the control data structures, and create and load the
653 * DMA map for it.
654 */
655 if ((error = bus_dmamem_alloc(sc->sc_dmat,
656 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
657 0)) != 0) {
658 printf("%s: unable to allocate control data, error = %d\n",
659 sc->sc_dev.dv_xname, error);
660 goto fail_0;
661 }
662
663 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
664 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
665 BUS_DMA_COHERENT)) != 0) {
666 printf("%s: unable to map control data, error = %d\n",
667 sc->sc_dev.dv_xname, error);
668 goto fail_1;
669 }
670
671 if ((error = bus_dmamap_create(sc->sc_dmat,
672 sizeof(struct wm_control_data), 1,
673 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
674 printf("%s: unable to create control data DMA map, "
675 "error = %d\n", sc->sc_dev.dv_xname, error);
676 goto fail_2;
677 }
678
679 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
680 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
681 0)) != 0) {
682 printf("%s: unable to load control data DMA map, error = %d\n",
683 sc->sc_dev.dv_xname, error);
684 goto fail_3;
685 }
686
687 /*
688 * Create the transmit buffer DMA maps.
689 */
690 for (i = 0; i < WM_TXQUEUELEN; i++) {
691 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
692 WM_NTXSEGS, MCLBYTES, 0, 0,
693 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
694 printf("%s: unable to create Tx DMA map %d, "
695 "error = %d\n", sc->sc_dev.dv_xname, i, error);
696 goto fail_4;
697 }
698 }
699
700 /*
701 * Create the receive buffer DMA maps.
702 */
703 for (i = 0; i < WM_NRXDESC; i++) {
704 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
705 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
706 printf("%s: unable to create Rx DMA map %d, "
707 "error = %d\n", sc->sc_dev.dv_xname, i, error);
708 goto fail_5;
709 }
710 sc->sc_rxsoft[i].rxs_mbuf = NULL;
711 }
712
713 /*
714 * Reset the chip to a known state.
715 */
716 wm_reset(sc);
717
718 /*
719 * Read the Ethernet address from the EEPROM.
720 */
721 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
722 sizeof(myea) / sizeof(myea[0]), myea);
723 enaddr[0] = myea[0] & 0xff;
724 enaddr[1] = myea[0] >> 8;
725 enaddr[2] = myea[1] & 0xff;
726 enaddr[3] = myea[1] >> 8;
727 enaddr[4] = myea[2] & 0xff;
728 enaddr[5] = myea[2] >> 8;
729
730 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
731 ether_sprintf(enaddr));
732
733 /*
734 * Read the config info from the EEPROM, and set up various
735 * bits in the control registers based on their contents.
736 */
737 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
738 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
739 if (sc->sc_type >= WM_T_CORDOVA)
740 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
741
742 if (cfg1 & EEPROM_CFG1_ILOS)
743 sc->sc_ctrl |= CTRL_ILOS;
744 if (sc->sc_type >= WM_T_CORDOVA) {
745 sc->sc_ctrl |=
746 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
747 CTRL_SWDPIO_SHIFT;
748 sc->sc_ctrl |=
749 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
750 CTRL_SWDPINS_SHIFT;
751 } else {
752 sc->sc_ctrl |=
753 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
754 CTRL_SWDPIO_SHIFT;
755 }
756
757 #if 0
758 if (sc->sc_type >= WM_T_CORDOVA) {
759 if (cfg1 & EEPROM_CFG1_IPS0)
760 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
761 if (cfg1 & EEPROM_CFG1_IPS1)
762 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
763 sc->sc_ctrl_ext |=
764 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
765 CTRL_EXT_SWDPIO_SHIFT;
766 sc->sc_ctrl_ext |=
767 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
768 CTRL_EXT_SWDPINS_SHIFT;
769 } else {
770 sc->sc_ctrl_ext |=
771 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
772 CTRL_EXT_SWDPIO_SHIFT;
773 }
774 #endif
775
776 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
777 #if 0
778 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
779 #endif
780
781 /*
782 * Set up some register offsets that are different between
783 * the Wiseman and the Livengood and later chips.
784 */
785 if (sc->sc_type < WM_T_LIVENGOOD) {
786 sc->sc_rdt_reg = WMREG_OLD_RDT0;
787 sc->sc_tdt_reg = WMREG_OLD_TDT;
788 } else {
789 sc->sc_rdt_reg = WMREG_RDT;
790 sc->sc_tdt_reg = WMREG_TDT;
791 }
792
793 /*
794 * Determine if we should use flow control. We should
795 * always use it, unless we're on a Wiseman < 2.1.
796 */
797 if (sc->sc_type >= WM_T_WISEMAN_2_1)
798 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
799
800 /*
801 * Determine if we're TBI or GMII mode, and initialize the
802 * media structures accordingly.
803 */
804 if (sc->sc_type < WM_T_LIVENGOOD ||
805 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
806 if (wmp->wmp_flags & WMP_F_1000T)
807 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
808 "product!\n", sc->sc_dev.dv_xname);
809 wm_tbi_mediainit(sc);
810 } else {
811 if (wmp->wmp_flags & WMP_F_1000X)
812 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
813 "product!\n", sc->sc_dev.dv_xname);
814 wm_gmii_mediainit(sc);
815 }
816
817 ifp = &sc->sc_ethercom.ec_if;
818 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
819 ifp->if_softc = sc;
820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 ifp->if_ioctl = wm_ioctl;
822 ifp->if_start = wm_start;
823 ifp->if_watchdog = wm_watchdog;
824 ifp->if_init = wm_init;
825 ifp->if_stop = wm_stop;
826 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
827 IFQ_SET_READY(&ifp->if_snd);
828
829 /*
830 * If we're a Livengood or greater, we can support VLANs.
831 */
832 if (sc->sc_type >= WM_T_LIVENGOOD)
833 sc->sc_ethercom.ec_capabilities |=
834 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
835
836 /*
837 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
838 * on Livengood and later.
839 */
840 if (sc->sc_type >= WM_T_LIVENGOOD)
841 ifp->if_capabilities |=
842 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
843
844 /*
845 * Attach the interface.
846 */
847 if_attach(ifp);
848 ether_ifattach(ifp, enaddr);
849
850 #ifdef WM_EVENT_COUNTERS
851 /* Attach event counters. */
852 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
853 NULL, sc->sc_dev.dv_xname, "txsstall");
854 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
855 NULL, sc->sc_dev.dv_xname, "txdstall");
856 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
857 NULL, sc->sc_dev.dv_xname, "txforceintr");
858 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
859 NULL, sc->sc_dev.dv_xname, "txdw");
860 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
861 NULL, sc->sc_dev.dv_xname, "txqe");
862 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
863 NULL, sc->sc_dev.dv_xname, "rxintr");
864 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
865 NULL, sc->sc_dev.dv_xname, "linkintr");
866
867 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
868 NULL, sc->sc_dev.dv_xname, "rxipsum");
869 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, "rxtusum");
871 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
872 NULL, sc->sc_dev.dv_xname, "txipsum");
873 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
874 NULL, sc->sc_dev.dv_xname, "txtusum");
875
876 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
877 NULL, sc->sc_dev.dv_xname, "txctx init");
878 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
879 NULL, sc->sc_dev.dv_xname, "txctx hit");
880 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
881 NULL, sc->sc_dev.dv_xname, "txctx miss");
882
883 for (i = 0; i < WM_NTXSEGS; i++)
884 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
885 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
886
887 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
888 NULL, sc->sc_dev.dv_xname, "txdrop");
889
890 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
891 NULL, sc->sc_dev.dv_xname, "tu");
892 #endif /* WM_EVENT_COUNTERS */
893
894 /*
895 * Make sure the interface is shutdown during reboot.
896 */
897 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
898 if (sc->sc_sdhook == NULL)
899 printf("%s: WARNING: unable to establish shutdown hook\n",
900 sc->sc_dev.dv_xname);
901 return;
902
903 /*
904 * Free any resources we've allocated during the failed attach
905 * attempt. Do this in reverse order and fall through.
906 */
907 fail_5:
908 for (i = 0; i < WM_NRXDESC; i++) {
909 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
910 bus_dmamap_destroy(sc->sc_dmat,
911 sc->sc_rxsoft[i].rxs_dmamap);
912 }
913 fail_4:
914 for (i = 0; i < WM_TXQUEUELEN; i++) {
915 if (sc->sc_txsoft[i].txs_dmamap != NULL)
916 bus_dmamap_destroy(sc->sc_dmat,
917 sc->sc_txsoft[i].txs_dmamap);
918 }
919 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
920 fail_3:
921 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
922 fail_2:
923 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
924 sizeof(struct wm_control_data));
925 fail_1:
926 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
927 fail_0:
928 return;
929 }
930
931 /*
932 * wm_shutdown:
933 *
934 * Make sure the interface is stopped at reboot time.
935 */
936 void
937 wm_shutdown(void *arg)
938 {
939 struct wm_softc *sc = arg;
940
941 wm_stop(&sc->sc_ethercom.ec_if, 1);
942 }
943
944 /*
945 * wm_tx_cksum:
946 *
947 * Set up TCP/IP checksumming parameters for the
948 * specified packet.
949 */
950 static int
951 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
952 uint32_t *fieldsp)
953 {
954 struct mbuf *m0 = txs->txs_mbuf;
955 struct livengood_tcpip_ctxdesc *t;
956 uint32_t fields = 0, ipcs, tucs;
957 struct ip *ip;
958 int offset, iphl;
959
960 /*
961 * XXX It would be nice if the mbuf pkthdr had offset
962 * fields for the protocol headers.
963 */
964
965 /* XXX Assumes normal Ethernet encap. */
966 offset = ETHER_HDR_LEN;
967
968 /* XXX */
969 if (m0->m_len < (offset + sizeof(struct ip))) {
970 printf("%s: wm_tx_cksum: need to m_pullup, "
971 "packet dropped\n", sc->sc_dev.dv_xname);
972 return (EINVAL);
973 }
974
975 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
976 iphl = ip->ip_hl << 2;
977
978 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
979 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
980 fields |= htole32(WTX_IXSM);
981 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
982 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
983 WTX_TCPIP_IPCSE(offset + iphl - 1));
984 } else
985 ipcs = 0;
986
987 offset += iphl;
988
989 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
990 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
991 fields |= htole32(WTX_TXSM);
992 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
993 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
994 WTX_TCPIP_TUCSE(0) /* rest of packet */);
995 } else
996 tucs = 0;
997
998 if (sc->sc_txctx_ipcs == ipcs &&
999 sc->sc_txctx_tucs == tucs) {
1000 /* Cached context is fine. */
1001 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1002 } else {
1003 /* Fill in the context descriptor. */
1004 #ifdef WM_EVENT_COUNTERS
1005 if (sc->sc_txctx_ipcs == 0xffffffff &&
1006 sc->sc_txctx_tucs == 0xffffffff)
1007 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1008 else
1009 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1010 #endif
1011 t = (struct livengood_tcpip_ctxdesc *)
1012 &sc->sc_txdescs[sc->sc_txnext];
1013 t->tcpip_ipcs = ipcs;
1014 t->tcpip_tucs = tucs;
1015 t->tcpip_cmdlen =
1016 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1017 t->tcpip_seg = 0;
1018 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1019
1020 sc->sc_txctx_ipcs = ipcs;
1021 sc->sc_txctx_tucs = tucs;
1022
1023 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1024 txs->txs_ndesc++;
1025 sc->sc_txwin++;
1026 }
1027
1028 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1029 *fieldsp = fields;
1030
1031 return (0);
1032 }
1033
1034 /*
1035 * wm_start: [ifnet interface function]
1036 *
1037 * Start packet transmission on the interface.
1038 */
1039 void
1040 wm_start(struct ifnet *ifp)
1041 {
1042 struct wm_softc *sc = ifp->if_softc;
1043 struct mbuf *m0/*, *m*/;
1044 struct wm_txsoft *txs;
1045 bus_dmamap_t dmamap;
1046 int error, nexttx, lasttx, ofree, seg;
1047 uint32_t cksumcmd, cksumfields;
1048
1049 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1050 return;
1051
1052 /*
1053 * Remember the previous number of free descriptors.
1054 */
1055 ofree = sc->sc_txfree;
1056
1057 /*
1058 * Loop through the send queue, setting up transmit descriptors
1059 * until we drain the queue, or use up all available transmit
1060 * descriptors.
1061 */
1062 for (;;) {
1063 /* Grab a packet off the queue. */
1064 IFQ_POLL(&ifp->if_snd, m0);
1065 if (m0 == NULL)
1066 break;
1067
1068 DPRINTF(WM_DEBUG_TX,
1069 ("%s: TX: have packet to transmit: %p\n",
1070 sc->sc_dev.dv_xname, m0));
1071
1072 /* Get a work queue entry. */
1073 if (sc->sc_txsfree == 0) {
1074 DPRINTF(WM_DEBUG_TX,
1075 ("%s: TX: no free job descriptors\n",
1076 sc->sc_dev.dv_xname));
1077 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1078 break;
1079 }
1080
1081 txs = &sc->sc_txsoft[sc->sc_txsnext];
1082 dmamap = txs->txs_dmamap;
1083
1084 /*
1085 * Load the DMA map. If this fails, the packet either
1086 * didn't fit in the allotted number of segments, or we
1087 * were short on resources. For the too-many-segments
1088 * case, we simply report an error and drop the packet,
1089 * since we can't sanely copy a jumbo packet to a single
1090 * buffer.
1091 */
1092 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1093 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1094 if (error) {
1095 if (error == EFBIG) {
1096 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1097 printf("%s: Tx packet consumes too many "
1098 "DMA segments, dropping...\n",
1099 sc->sc_dev.dv_xname);
1100 IFQ_DEQUEUE(&ifp->if_snd, m0);
1101 m_freem(m0);
1102 continue;
1103 }
1104 /*
1105 * Short on resources, just stop for now.
1106 */
1107 DPRINTF(WM_DEBUG_TX,
1108 ("%s: TX: dmamap load failed: %d\n",
1109 sc->sc_dev.dv_xname, error));
1110 break;
1111 }
1112
1113 /*
1114 * Ensure we have enough descriptors free to describe
1115 * the packet. Note, we always reserve one descriptor
1116 * at the end of the ring due to the semantics of the
1117 * TDT register, plus one more in the event we need
1118 * to re-load checksum offload context.
1119 */
1120 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1121 /*
1122 * Not enough free descriptors to transmit this
1123 * packet. We haven't committed anything yet,
1124 * so just unload the DMA map, put the packet
1125 * pack on the queue, and punt. Notify the upper
1126 * layer that there are no more slots left.
1127 */
1128 DPRINTF(WM_DEBUG_TX,
1129 ("%s: TX: need %d descriptors, have %d\n",
1130 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1131 sc->sc_txfree - 1));
1132 ifp->if_flags |= IFF_OACTIVE;
1133 bus_dmamap_unload(sc->sc_dmat, dmamap);
1134 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1135 break;
1136 }
1137
1138 IFQ_DEQUEUE(&ifp->if_snd, m0);
1139
1140 /*
1141 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1142 */
1143
1144 /* Sync the DMA map. */
1145 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1146 BUS_DMASYNC_PREWRITE);
1147
1148 DPRINTF(WM_DEBUG_TX,
1149 ("%s: TX: packet has %d DMA segments\n",
1150 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1151
1152 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1153
1154 /*
1155 * Store a pointer to the packet so that we can free it
1156 * later.
1157 *
1158 * Initially, we consider the number of descriptors the
1159 * packet uses the number of DMA segments. This may be
1160 * incremented by 1 if we do checksum offload (a descriptor
1161 * is used to set the checksum context).
1162 */
1163 txs->txs_mbuf = m0;
1164 txs->txs_firstdesc = sc->sc_txnext;
1165 txs->txs_ndesc = dmamap->dm_nsegs;
1166
1167 /*
1168 * Set up checksum offload parameters for
1169 * this packet.
1170 */
1171 if (m0->m_pkthdr.csum_flags &
1172 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1173 if (wm_tx_cksum(sc, txs, &cksumcmd,
1174 &cksumfields) != 0) {
1175 /* Error message already displayed. */
1176 m_freem(m0);
1177 bus_dmamap_unload(sc->sc_dmat, dmamap);
1178 txs->txs_mbuf = NULL;
1179 continue;
1180 }
1181 } else {
1182 cksumcmd = 0;
1183 cksumfields = 0;
1184 }
1185
1186 cksumcmd |= htole32(WTX_CMD_IDE);
1187
1188 /*
1189 * Initialize the transmit descriptor.
1190 */
1191 for (nexttx = sc->sc_txnext, seg = 0;
1192 seg < dmamap->dm_nsegs;
1193 seg++, nexttx = WM_NEXTTX(nexttx)) {
1194 /*
1195 * Note: we currently only use 32-bit DMA
1196 * addresses.
1197 */
1198 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1199 htole32(dmamap->dm_segs[seg].ds_addr);
1200 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1201 htole32(dmamap->dm_segs[seg].ds_len);
1202 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1203 cksumfields;
1204 lasttx = nexttx;
1205
1206 sc->sc_txwin++;
1207
1208 DPRINTF(WM_DEBUG_TX,
1209 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1210 sc->sc_dev.dv_xname, nexttx,
1211 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1212 (uint32_t) dmamap->dm_segs[seg].ds_len));
1213 }
1214
1215 /*
1216 * Set up the command byte on the last descriptor of
1217 * the packet. If we're in the interrupt delay window,
1218 * delay the interrupt.
1219 */
1220 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1221 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1222 if (sc->sc_txwin >= (WM_NTXDESC * 2 / 3)) {
1223 WM_EVCNT_INCR(&sc->sc_ev_txforceintr);
1224 sc->sc_txdescs[lasttx].wtx_cmdlen &=
1225 htole32(~WTX_CMD_IDE);
1226 sc->sc_txwin = 0;
1227 }
1228
1229 #if 0 /* XXXJRT */
1230 /*
1231 * If VLANs are enabled and the packet has a VLAN tag, set
1232 * up the descriptor to encapsulate the packet for us.
1233 *
1234 * This is only valid on the last descriptor of the packet.
1235 */
1236 if (sc->sc_ethercom.ec_nvlans != 0 &&
1237 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1238 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1239 htole32(WTX_CMD_VLE);
1240 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1241 = htole16(*mtod(m, int *) & 0xffff);
1242 }
1243 #endif /* XXXJRT */
1244
1245 txs->txs_lastdesc = lasttx;
1246
1247 DPRINTF(WM_DEBUG_TX,
1248 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1249 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1250
1251 /* Sync the descriptors we're using. */
1252 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1253 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1254
1255 /* Give the packet to the chip. */
1256 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1257
1258 DPRINTF(WM_DEBUG_TX,
1259 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1260
1261 DPRINTF(WM_DEBUG_TX,
1262 ("%s: TX: finished transmitting packet, job %d\n",
1263 sc->sc_dev.dv_xname, sc->sc_txsnext));
1264
1265 /* Advance the tx pointer. */
1266 sc->sc_txfree -= txs->txs_ndesc;
1267 sc->sc_txnext = nexttx;
1268
1269 sc->sc_txsfree--;
1270 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1271
1272 #if NBPFILTER > 0
1273 /* Pass the packet to any BPF listeners. */
1274 if (ifp->if_bpf)
1275 bpf_mtap(ifp->if_bpf, m0);
1276 #endif /* NBPFILTER > 0 */
1277 }
1278
1279 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1280 /* No more slots; notify upper layer. */
1281 ifp->if_flags |= IFF_OACTIVE;
1282 }
1283
1284 if (sc->sc_txfree != ofree) {
1285 /* Set a watchdog timer in case the chip flakes out. */
1286 ifp->if_timer = 5;
1287 }
1288 }
1289
1290 /*
1291 * wm_watchdog: [ifnet interface function]
1292 *
1293 * Watchdog timer handler.
1294 */
1295 void
1296 wm_watchdog(struct ifnet *ifp)
1297 {
1298 struct wm_softc *sc = ifp->if_softc;
1299
1300 /*
1301 * Since we're using delayed interrupts, sweep up
1302 * before we report an error.
1303 */
1304 wm_txintr(sc);
1305
1306 if (sc->sc_txfree != WM_NTXDESC) {
1307 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1308 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1309 sc->sc_txnext);
1310 ifp->if_oerrors++;
1311
1312 /* Reset the interface. */
1313 (void) wm_init(ifp);
1314 }
1315
1316 /* Try to get more packets going. */
1317 wm_start(ifp);
1318 }
1319
1320 /*
1321 * wm_ioctl: [ifnet interface function]
1322 *
1323 * Handle control requests from the operator.
1324 */
1325 int
1326 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1327 {
1328 struct wm_softc *sc = ifp->if_softc;
1329 struct ifreq *ifr = (struct ifreq *) data;
1330 int s, error;
1331
1332 s = splnet();
1333
1334 switch (cmd) {
1335 case SIOCSIFMEDIA:
1336 case SIOCGIFMEDIA:
1337 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1338 break;
1339
1340 default:
1341 error = ether_ioctl(ifp, cmd, data);
1342 if (error == ENETRESET) {
1343 /*
1344 * Multicast list has changed; set the hardware filter
1345 * accordingly.
1346 */
1347 wm_set_filter(sc);
1348 error = 0;
1349 }
1350 break;
1351 }
1352
1353 /* Try to get more packets going. */
1354 wm_start(ifp);
1355
1356 splx(s);
1357 return (error);
1358 }
1359
1360 /*
1361 * wm_intr:
1362 *
1363 * Interrupt service routine.
1364 */
1365 int
1366 wm_intr(void *arg)
1367 {
1368 struct wm_softc *sc = arg;
1369 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1370 uint32_t icr;
1371 int wantinit, handled = 0;
1372
1373 for (wantinit = 0; wantinit == 0;) {
1374 icr = CSR_READ(sc, WMREG_ICR);
1375 if ((icr & sc->sc_icr) == 0)
1376 break;
1377
1378 handled = 1;
1379
1380 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1381 DPRINTF(WM_DEBUG_RX,
1382 ("%s: RX: got Rx intr 0x%08x\n",
1383 sc->sc_dev.dv_xname,
1384 icr & (ICR_RXDMT0|ICR_RXT0)));
1385 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1386 wm_rxintr(sc);
1387 }
1388
1389 if (icr & (ICR_TXDW|ICR_TXQE)) {
1390 DPRINTF(WM_DEBUG_TX,
1391 ("%s: TX: got TDXW|TXQE interrupt\n",
1392 sc->sc_dev.dv_xname));
1393 #ifdef WM_EVENT_COUNTERS
1394 if (icr & ICR_TXDW)
1395 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1396 else if (icr & ICR_TXQE)
1397 WM_EVCNT_INCR(&sc->sc_ev_txqe);
1398 #endif
1399 wm_txintr(sc);
1400 }
1401
1402 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1403 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1404 wm_linkintr(sc, icr);
1405 }
1406
1407 if (icr & ICR_RXO) {
1408 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1409 wantinit = 1;
1410 }
1411 }
1412
1413 if (handled) {
1414 if (wantinit)
1415 wm_init(ifp);
1416
1417 /* Try to get more packets going. */
1418 wm_start(ifp);
1419 }
1420
1421 return (handled);
1422 }
1423
1424 /*
1425 * wm_txintr:
1426 *
1427 * Helper; handle transmit interrupts.
1428 */
1429 void
1430 wm_txintr(struct wm_softc *sc)
1431 {
1432 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1433 struct wm_txsoft *txs;
1434 uint8_t status;
1435 int i;
1436
1437 ifp->if_flags &= ~IFF_OACTIVE;
1438
1439 /*
1440 * Go through the Tx list and free mbufs for those
1441 * frams which have been transmitted.
1442 */
1443 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1444 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1445 txs = &sc->sc_txsoft[i];
1446
1447 DPRINTF(WM_DEBUG_TX,
1448 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1449
1450 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1451 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1452
1453 status = le32toh(sc->sc_txdescs[
1454 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1455 if ((status & WTX_ST_DD) == 0)
1456 break;
1457
1458 DPRINTF(WM_DEBUG_TX,
1459 ("%s: TX: job %d done: descs %d..%d\n",
1460 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1461 txs->txs_lastdesc));
1462
1463 /*
1464 * XXX We should probably be using the statistics
1465 * XXX registers, but I don't know if they exist
1466 * XXX on chips before the Cordova.
1467 */
1468
1469 #ifdef WM_EVENT_COUNTERS
1470 if (status & WTX_ST_TU)
1471 WM_EVCNT_INCR(&sc->sc_ev_tu);
1472 #endif /* WM_EVENT_COUNTERS */
1473
1474 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1475 ifp->if_oerrors++;
1476 if (status & WTX_ST_LC)
1477 printf("%s: late collision\n",
1478 sc->sc_dev.dv_xname);
1479 else if (status & WTX_ST_EC) {
1480 ifp->if_collisions += 16;
1481 printf("%s: excessive collisions\n",
1482 sc->sc_dev.dv_xname);
1483 }
1484 } else
1485 ifp->if_opackets++;
1486
1487 sc->sc_txfree += txs->txs_ndesc;
1488 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1489 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1490 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1491 m_freem(txs->txs_mbuf);
1492 txs->txs_mbuf = NULL;
1493 }
1494
1495 /* Update the dirty transmit buffer pointer. */
1496 sc->sc_txsdirty = i;
1497 DPRINTF(WM_DEBUG_TX,
1498 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1499
1500 /*
1501 * If there are no more pending transmissions, cancel the watchdog
1502 * timer.
1503 */
1504 if (sc->sc_txsfree == WM_TXQUEUELEN)
1505 ifp->if_timer = 0;
1506 if (sc->sc_txfree == WM_NTXDESC)
1507 sc->sc_txwin = 0;
1508 }
1509
1510 /*
1511 * wm_rxintr:
1512 *
1513 * Helper; handle receive interrupts.
1514 */
1515 void
1516 wm_rxintr(struct wm_softc *sc)
1517 {
1518 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1519 struct wm_rxsoft *rxs;
1520 struct mbuf *m;
1521 int i, len;
1522 uint8_t status, errors;
1523
1524 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1525 rxs = &sc->sc_rxsoft[i];
1526
1527 DPRINTF(WM_DEBUG_RX,
1528 ("%s: RX: checking descriptor %d\n",
1529 sc->sc_dev.dv_xname, i));
1530
1531 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1532
1533 status = sc->sc_rxdescs[i].wrx_status;
1534 errors = sc->sc_rxdescs[i].wrx_errors;
1535 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1536
1537 if ((status & WRX_ST_DD) == 0) {
1538 /*
1539 * We have processed all of the receive descriptors.
1540 */
1541 break;
1542 }
1543
1544 if (__predict_false(sc->sc_rxdiscard)) {
1545 DPRINTF(WM_DEBUG_RX,
1546 ("%s: RX: discarding contents of descriptor %d\n",
1547 sc->sc_dev.dv_xname, i));
1548 WM_INIT_RXDESC(sc, i);
1549 if (status & WRX_ST_EOP) {
1550 /* Reset our state. */
1551 DPRINTF(WM_DEBUG_RX,
1552 ("%s: RX: resetting rxdiscard -> 0\n",
1553 sc->sc_dev.dv_xname));
1554 sc->sc_rxdiscard = 0;
1555 }
1556 continue;
1557 }
1558
1559 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1560 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1561
1562 m = rxs->rxs_mbuf;
1563
1564 /*
1565 * Add a new receive buffer to the ring.
1566 */
1567 if (wm_add_rxbuf(sc, i) != 0) {
1568 /*
1569 * Failed, throw away what we've done so
1570 * far, and discard the rest of the packet.
1571 */
1572 ifp->if_ierrors++;
1573 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1574 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1575 WM_INIT_RXDESC(sc, i);
1576 if ((status & WRX_ST_EOP) == 0)
1577 sc->sc_rxdiscard = 1;
1578 if (sc->sc_rxhead != NULL)
1579 m_freem(sc->sc_rxhead);
1580 WM_RXCHAIN_RESET(sc);
1581 DPRINTF(WM_DEBUG_RX,
1582 ("%s: RX: Rx buffer allocation failed, "
1583 "dropping packet%s\n", sc->sc_dev.dv_xname,
1584 sc->sc_rxdiscard ? " (discard)" : ""));
1585 continue;
1586 }
1587
1588 WM_RXCHAIN_LINK(sc, m);
1589
1590 m->m_len = len;
1591
1592 DPRINTF(WM_DEBUG_RX,
1593 ("%s: RX: buffer at %p len %d\n",
1594 sc->sc_dev.dv_xname, m->m_data, len));
1595
1596 /*
1597 * If this is not the end of the packet, keep
1598 * looking.
1599 */
1600 if ((status & WRX_ST_EOP) == 0) {
1601 sc->sc_rxlen += len;
1602 DPRINTF(WM_DEBUG_RX,
1603 ("%s: RX: not yet EOP, rxlen -> %d\n",
1604 sc->sc_dev.dv_xname, sc->sc_rxlen));
1605 continue;
1606 }
1607
1608 /*
1609 * Okay, we have the entire packet now...
1610 */
1611 *sc->sc_rxtailp = NULL;
1612 m = sc->sc_rxhead;
1613 len += sc->sc_rxlen;
1614
1615 WM_RXCHAIN_RESET(sc);
1616
1617 DPRINTF(WM_DEBUG_RX,
1618 ("%s: RX: have entire packet, len -> %d\n",
1619 sc->sc_dev.dv_xname, len));
1620
1621 /*
1622 * If an error occurred, update stats and drop the packet.
1623 */
1624 if (errors &
1625 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1626 ifp->if_ierrors++;
1627 if (errors & WRX_ER_SE)
1628 printf("%s: symbol error\n",
1629 sc->sc_dev.dv_xname);
1630 else if (errors & WRX_ER_SEQ)
1631 printf("%s: receive sequence error\n",
1632 sc->sc_dev.dv_xname);
1633 else if (errors & WRX_ER_CE)
1634 printf("%s: CRC error\n",
1635 sc->sc_dev.dv_xname);
1636 m_freem(m);
1637 continue;
1638 }
1639
1640 /*
1641 * No errors. Receive the packet.
1642 *
1643 * Note, we have configured the chip to include the
1644 * CRC with every packet.
1645 */
1646 m->m_flags |= M_HASFCS;
1647 m->m_pkthdr.rcvif = ifp;
1648 m->m_pkthdr.len = len;
1649
1650 #if 0 /* XXXJRT */
1651 /*
1652 * If VLANs are enabled, VLAN packets have been unwrapped
1653 * for us. Associate the tag with the packet.
1654 */
1655 if (sc->sc_ethercom.ec_nvlans != 0 &&
1656 (status & WRX_ST_VP) != 0) {
1657 struct mbuf *vtag;
1658
1659 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1660 if (vtag == NULL) {
1661 ifp->if_ierrors++;
1662 printf("%s: unable to allocate VLAN tag\n",
1663 sc->sc_dev.dv_xname);
1664 m_freem(m);
1665 continue;
1666 }
1667
1668 *mtod(m, int *) =
1669 le16toh(sc->sc_rxdescs[i].wrx_special);
1670 vtag->m_len = sizeof(int);
1671 }
1672 #endif /* XXXJRT */
1673
1674 /*
1675 * Set up checksum info for this packet.
1676 */
1677 if (status & WRX_ST_IPCS) {
1678 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1679 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1680 if (errors & WRX_ER_IPE)
1681 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1682 }
1683 if (status & WRX_ST_TCPCS) {
1684 /*
1685 * Note: we don't know if this was TCP or UDP,
1686 * so we just set both bits, and expect the
1687 * upper layers to deal.
1688 */
1689 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1690 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1691 if (errors & WRX_ER_TCPE)
1692 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1693 }
1694
1695 ifp->if_ipackets++;
1696
1697 #if NBPFILTER > 0
1698 /* Pass this up to any BPF listeners. */
1699 if (ifp->if_bpf)
1700 bpf_mtap(ifp->if_bpf, m);
1701 #endif /* NBPFILTER > 0 */
1702
1703 /* Pass it on. */
1704 (*ifp->if_input)(ifp, m);
1705 }
1706
1707 /* Update the receive pointer. */
1708 sc->sc_rxptr = i;
1709
1710 DPRINTF(WM_DEBUG_RX,
1711 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1712 }
1713
1714 /*
1715 * wm_linkintr:
1716 *
1717 * Helper; handle link interrupts.
1718 */
1719 void
1720 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1721 {
1722 uint32_t status;
1723
1724 /*
1725 * If we get a link status interrupt on a 1000BASE-T
1726 * device, just fall into the normal MII tick path.
1727 */
1728 if (sc->sc_flags & WM_F_HAS_MII) {
1729 if (icr & ICR_LSC) {
1730 DPRINTF(WM_DEBUG_LINK,
1731 ("%s: LINK: LSC -> mii_tick\n",
1732 sc->sc_dev.dv_xname));
1733 mii_tick(&sc->sc_mii);
1734 } else if (icr & ICR_RXSEQ) {
1735 DPRINTF(WM_DEBUG_LINK,
1736 ("%s: LINK Receive sequence error\n",
1737 sc->sc_dev.dv_xname));
1738 }
1739 return;
1740 }
1741
1742 /*
1743 * If we are now receiving /C/, check for link again in
1744 * a couple of link clock ticks.
1745 */
1746 if (icr & ICR_RXCFG) {
1747 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1748 sc->sc_dev.dv_xname));
1749 sc->sc_tbi_anstate = 2;
1750 }
1751
1752 if (icr & ICR_LSC) {
1753 status = CSR_READ(sc, WMREG_STATUS);
1754 if (status & STATUS_LU) {
1755 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1756 sc->sc_dev.dv_xname,
1757 (status & STATUS_FD) ? "FDX" : "HDX"));
1758 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1759 if (status & STATUS_FD)
1760 sc->sc_tctl |=
1761 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1762 else
1763 sc->sc_tctl |=
1764 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1765 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1766 sc->sc_tbi_linkup = 1;
1767 } else {
1768 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1769 sc->sc_dev.dv_xname));
1770 sc->sc_tbi_linkup = 0;
1771 }
1772 sc->sc_tbi_anstate = 2;
1773 wm_tbi_set_linkled(sc);
1774 } else if (icr & ICR_RXSEQ) {
1775 DPRINTF(WM_DEBUG_LINK,
1776 ("%s: LINK: Receive sequence error\n",
1777 sc->sc_dev.dv_xname));
1778 }
1779 }
1780
1781 /*
1782 * wm_tick:
1783 *
1784 * One second timer, used to check link status, sweep up
1785 * completed transmit jobs, etc.
1786 */
1787 void
1788 wm_tick(void *arg)
1789 {
1790 struct wm_softc *sc = arg;
1791 int s;
1792
1793 s = splnet();
1794
1795 if (sc->sc_flags & WM_F_HAS_MII)
1796 mii_tick(&sc->sc_mii);
1797 else
1798 wm_tbi_check_link(sc);
1799
1800 splx(s);
1801
1802 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1803 }
1804
1805 /*
1806 * wm_reset:
1807 *
1808 * Reset the i82542 chip.
1809 */
1810 void
1811 wm_reset(struct wm_softc *sc)
1812 {
1813 int i;
1814
1815 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1816 delay(10000);
1817
1818 for (i = 0; i < 1000; i++) {
1819 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1820 return;
1821 delay(20);
1822 }
1823
1824 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1825 printf("%s: WARNING: reset failed to complete\n",
1826 sc->sc_dev.dv_xname);
1827 }
1828
1829 /*
1830 * wm_init: [ifnet interface function]
1831 *
1832 * Initialize the interface. Must be called at splnet().
1833 */
1834 int
1835 wm_init(struct ifnet *ifp)
1836 {
1837 struct wm_softc *sc = ifp->if_softc;
1838 struct wm_rxsoft *rxs;
1839 int i, error = 0;
1840 uint32_t reg;
1841
1842 /* Cancel any pending I/O. */
1843 wm_stop(ifp, 0);
1844
1845 /* Reset the chip to a known state. */
1846 wm_reset(sc);
1847
1848 /* Initialize the transmit descriptor ring. */
1849 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1850 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1851 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1852 sc->sc_txfree = WM_NTXDESC;
1853 sc->sc_txnext = 0;
1854 sc->sc_txwin = 0;
1855
1856 sc->sc_txctx_ipcs = 0xffffffff;
1857 sc->sc_txctx_tucs = 0xffffffff;
1858
1859 if (sc->sc_type < WM_T_LIVENGOOD) {
1860 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1861 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1862 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1863 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1864 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1865 CSR_WRITE(sc, WMREG_OLD_TIDV, 1024);
1866 } else {
1867 CSR_WRITE(sc, WMREG_TBDAH, 0);
1868 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1869 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1870 CSR_WRITE(sc, WMREG_TDH, 0);
1871 CSR_WRITE(sc, WMREG_TDT, 0);
1872 CSR_WRITE(sc, WMREG_TIDV, 1024);
1873
1874 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1875 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1876 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1877 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1878 }
1879 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1880 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1881
1882 /* Initialize the transmit job descriptors. */
1883 for (i = 0; i < WM_TXQUEUELEN; i++)
1884 sc->sc_txsoft[i].txs_mbuf = NULL;
1885 sc->sc_txsfree = WM_TXQUEUELEN;
1886 sc->sc_txsnext = 0;
1887 sc->sc_txsdirty = 0;
1888
1889 /*
1890 * Initialize the receive descriptor and receive job
1891 * descriptor rings.
1892 */
1893 if (sc->sc_type < WM_T_LIVENGOOD) {
1894 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1895 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1896 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1897 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1898 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1899 CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1900
1901 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1902 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1903 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1904 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1905 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1906 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1907 } else {
1908 CSR_WRITE(sc, WMREG_RDBAH, 0);
1909 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1910 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1911 CSR_WRITE(sc, WMREG_RDH, 0);
1912 CSR_WRITE(sc, WMREG_RDT, 0);
1913 CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1914 }
1915 for (i = 0; i < WM_NRXDESC; i++) {
1916 rxs = &sc->sc_rxsoft[i];
1917 if (rxs->rxs_mbuf == NULL) {
1918 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1919 printf("%s: unable to allocate or map rx "
1920 "buffer %d, error = %d\n",
1921 sc->sc_dev.dv_xname, i, error);
1922 /*
1923 * XXX Should attempt to run with fewer receive
1924 * XXX buffers instead of just failing.
1925 */
1926 wm_rxdrain(sc);
1927 goto out;
1928 }
1929 } else
1930 WM_INIT_RXDESC(sc, i);
1931 }
1932 sc->sc_rxptr = 0;
1933 sc->sc_rxdiscard = 0;
1934 WM_RXCHAIN_RESET(sc);
1935
1936 /*
1937 * Clear out the VLAN table -- we don't use it (yet).
1938 */
1939 CSR_WRITE(sc, WMREG_VET, 0);
1940 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1941 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1942
1943 /*
1944 * Set up flow-control parameters.
1945 *
1946 * XXX Values could probably stand some tuning.
1947 */
1948 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1949 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1950 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1951 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1952
1953 if (sc->sc_type < WM_T_LIVENGOOD) {
1954 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1955 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1956 } else {
1957 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1958 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1959 }
1960 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1961 }
1962
1963 #if 0 /* XXXJRT */
1964 /* Deal with VLAN enables. */
1965 if (sc->sc_ethercom.ec_nvlans != 0)
1966 sc->sc_ctrl |= CTRL_VME;
1967 else
1968 #endif /* XXXJRT */
1969 sc->sc_ctrl &= ~CTRL_VME;
1970
1971 /* Write the control registers. */
1972 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1973 #if 0
1974 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1975 #endif
1976
1977 /*
1978 * Set up checksum offload parameters.
1979 */
1980 reg = CSR_READ(sc, WMREG_RXCSUM);
1981 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1982 reg |= RXCSUM_IPOFL;
1983 else
1984 reg &= ~RXCSUM_IPOFL;
1985 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1986 reg |= RXCSUM_TUOFL;
1987 else
1988 reg &= ~RXCSUM_TUOFL;
1989 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1990
1991 /*
1992 * Set up the interrupt registers.
1993 */
1994 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1995 sc->sc_icr = ICR_TXDW | ICR_TXQE | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1996 ICR_RXO | ICR_RXT0;
1997 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1998 sc->sc_icr |= ICR_RXCFG;
1999 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2000
2001 /* Set up the inter-packet gap. */
2002 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2003
2004 #if 0 /* XXXJRT */
2005 /* Set the VLAN ethernetype. */
2006 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2007 #endif
2008
2009 /*
2010 * Set up the transmit control register; we start out with
2011 * a collision distance suitable for FDX, but update it whe
2012 * we resolve the media type.
2013 */
2014 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2015 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2016 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2017
2018 /* Set the media. */
2019 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2020
2021 /*
2022 * Set up the receive control register; we actually program
2023 * the register when we set the receive filter. Use multicast
2024 * address offset type 0.
2025 *
2026 * Only the Cordova has the ability to strip the incoming
2027 * CRC, so we don't enable that feature.
2028 */
2029 sc->sc_mchash_type = 0;
2030 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2031 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2032
2033 /* Set the receive filter. */
2034 wm_set_filter(sc);
2035
2036 /* Start the one second link check clock. */
2037 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2038
2039 /* ...all done! */
2040 ifp->if_flags |= IFF_RUNNING;
2041 ifp->if_flags &= ~IFF_OACTIVE;
2042
2043 out:
2044 if (error)
2045 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2046 return (error);
2047 }
2048
2049 /*
2050 * wm_rxdrain:
2051 *
2052 * Drain the receive queue.
2053 */
2054 void
2055 wm_rxdrain(struct wm_softc *sc)
2056 {
2057 struct wm_rxsoft *rxs;
2058 int i;
2059
2060 for (i = 0; i < WM_NRXDESC; i++) {
2061 rxs = &sc->sc_rxsoft[i];
2062 if (rxs->rxs_mbuf != NULL) {
2063 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2064 m_freem(rxs->rxs_mbuf);
2065 rxs->rxs_mbuf = NULL;
2066 }
2067 }
2068 }
2069
2070 /*
2071 * wm_stop: [ifnet interface function]
2072 *
2073 * Stop transmission on the interface.
2074 */
2075 void
2076 wm_stop(struct ifnet *ifp, int disable)
2077 {
2078 struct wm_softc *sc = ifp->if_softc;
2079 struct wm_txsoft *txs;
2080 int i;
2081
2082 /* Stop the one second clock. */
2083 callout_stop(&sc->sc_tick_ch);
2084
2085 if (sc->sc_flags & WM_F_HAS_MII) {
2086 /* Down the MII. */
2087 mii_down(&sc->sc_mii);
2088 }
2089
2090 /* Stop the transmit and receive processes. */
2091 CSR_WRITE(sc, WMREG_TCTL, 0);
2092 CSR_WRITE(sc, WMREG_RCTL, 0);
2093
2094 /* Release any queued transmit buffers. */
2095 for (i = 0; i < WM_TXQUEUELEN; i++) {
2096 txs = &sc->sc_txsoft[i];
2097 if (txs->txs_mbuf != NULL) {
2098 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2099 m_freem(txs->txs_mbuf);
2100 txs->txs_mbuf = NULL;
2101 }
2102 }
2103
2104 if (disable)
2105 wm_rxdrain(sc);
2106
2107 /* Mark the interface as down and cancel the watchdog timer. */
2108 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2109 ifp->if_timer = 0;
2110 }
2111
2112 /*
2113 * wm_read_eeprom:
2114 *
2115 * Read data from the serial EEPROM.
2116 */
2117 void
2118 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2119 {
2120 uint32_t reg;
2121 int i, x;
2122
2123 for (i = 0; i < wordcnt; i++) {
2124 /* Send CHIP SELECT for one clock tick. */
2125 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2126 delay(2);
2127
2128 /* Shift in the READ command. */
2129 for (x = 3; x > 0; x--) {
2130 reg = EECD_CS;
2131 if (UWIRE_OPC_READ & (1 << (x - 1)))
2132 reg |= EECD_DI;
2133 CSR_WRITE(sc, WMREG_EECD, reg);
2134 delay(2);
2135 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2136 delay(2);
2137 CSR_WRITE(sc, WMREG_EECD, reg);
2138 delay(2);
2139 }
2140
2141 /* Shift in address. */
2142 for (x = 6; x > 0; x--) {
2143 reg = EECD_CS;
2144 if ((word + i) & (1 << (x - 1)))
2145 reg |= EECD_DI;
2146 CSR_WRITE(sc, WMREG_EECD, reg);
2147 delay(2);
2148 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2149 delay(2);
2150 CSR_WRITE(sc, WMREG_EECD, reg);
2151 delay(2);
2152 }
2153
2154 /* Shift out the data. */
2155 reg = EECD_CS;
2156 data[i] = 0;
2157 for (x = 16; x > 0; x--) {
2158 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2159 delay(2);
2160 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2161 data[i] |= (1 << (x - 1));
2162 CSR_WRITE(sc, WMREG_EECD, reg);
2163 delay(2);
2164 }
2165
2166 /* Clear CHIP SELECT. */
2167 CSR_WRITE(sc, WMREG_EECD, 0);
2168 }
2169 }
2170
2171 /*
2172 * wm_add_rxbuf:
2173 *
2174 * Add a receive buffer to the indiciated descriptor.
2175 */
2176 int
2177 wm_add_rxbuf(struct wm_softc *sc, int idx)
2178 {
2179 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2180 struct mbuf *m;
2181 int error;
2182
2183 MGETHDR(m, M_DONTWAIT, MT_DATA);
2184 if (m == NULL)
2185 return (ENOBUFS);
2186
2187 MCLGET(m, M_DONTWAIT);
2188 if ((m->m_flags & M_EXT) == 0) {
2189 m_freem(m);
2190 return (ENOBUFS);
2191 }
2192
2193 if (rxs->rxs_mbuf != NULL)
2194 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2195
2196 rxs->rxs_mbuf = m;
2197
2198 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2199 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2200 BUS_DMA_READ|BUS_DMA_NOWAIT);
2201 if (error) {
2202 printf("%s: unable to load rx DMA map %d, error = %d\n",
2203 sc->sc_dev.dv_xname, idx, error);
2204 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2205 }
2206
2207 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2208 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2209
2210 WM_INIT_RXDESC(sc, idx);
2211
2212 return (0);
2213 }
2214
2215 /*
2216 * wm_set_ral:
2217 *
2218 * Set an entery in the receive address list.
2219 */
2220 static void
2221 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2222 {
2223 uint32_t ral_lo, ral_hi;
2224
2225 if (enaddr != NULL) {
2226 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2227 (enaddr[3] << 24);
2228 ral_hi = enaddr[4] | (enaddr[5] << 8);
2229 ral_hi |= RAL_AV;
2230 } else {
2231 ral_lo = 0;
2232 ral_hi = 0;
2233 }
2234
2235 if (sc->sc_type >= WM_T_CORDOVA) {
2236 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2237 ral_lo);
2238 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2239 ral_hi);
2240 } else {
2241 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2242 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2243 }
2244 }
2245
2246 /*
2247 * wm_mchash:
2248 *
2249 * Compute the hash of the multicast address for the 4096-bit
2250 * multicast filter.
2251 */
2252 static uint32_t
2253 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2254 {
2255 static const int lo_shift[4] = { 4, 3, 2, 0 };
2256 static const int hi_shift[4] = { 4, 5, 6, 8 };
2257 uint32_t hash;
2258
2259 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2260 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2261
2262 return (hash & 0xfff);
2263 }
2264
2265 /*
2266 * wm_set_filter:
2267 *
2268 * Set up the receive filter.
2269 */
2270 void
2271 wm_set_filter(struct wm_softc *sc)
2272 {
2273 struct ethercom *ec = &sc->sc_ethercom;
2274 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2275 struct ether_multi *enm;
2276 struct ether_multistep step;
2277 bus_addr_t mta_reg;
2278 uint32_t hash, reg, bit;
2279 int i;
2280
2281 if (sc->sc_type >= WM_T_CORDOVA)
2282 mta_reg = WMREG_CORDOVA_MTA;
2283 else
2284 mta_reg = WMREG_MTA;
2285
2286 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2287
2288 if (ifp->if_flags & IFF_BROADCAST)
2289 sc->sc_rctl |= RCTL_BAM;
2290 if (ifp->if_flags & IFF_PROMISC) {
2291 sc->sc_rctl |= RCTL_UPE;
2292 goto allmulti;
2293 }
2294
2295 /*
2296 * Set the station address in the first RAL slot, and
2297 * clear the remaining slots.
2298 */
2299 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2300 for (i = 1; i < WM_RAL_TABSIZE; i++)
2301 wm_set_ral(sc, NULL, i);
2302
2303 /* Clear out the multicast table. */
2304 for (i = 0; i < WM_MC_TABSIZE; i++)
2305 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2306
2307 ETHER_FIRST_MULTI(step, ec, enm);
2308 while (enm != NULL) {
2309 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2310 /*
2311 * We must listen to a range of multicast addresses.
2312 * For now, just accept all multicasts, rather than
2313 * trying to set only those filter bits needed to match
2314 * the range. (At this time, the only use of address
2315 * ranges is for IP multicast routing, for which the
2316 * range is big enough to require all bits set.)
2317 */
2318 goto allmulti;
2319 }
2320
2321 hash = wm_mchash(sc, enm->enm_addrlo);
2322
2323 reg = (hash >> 5) & 0x7f;
2324 bit = hash & 0x1f;
2325
2326 hash = CSR_READ(sc, mta_reg + (reg << 2));
2327 hash |= 1U << bit;
2328
2329 /* XXX Hardware bug?? */
2330 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2331 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2332 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2333 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2334 } else
2335 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2336
2337 ETHER_NEXT_MULTI(step, enm);
2338 }
2339
2340 ifp->if_flags &= ~IFF_ALLMULTI;
2341 goto setit;
2342
2343 allmulti:
2344 ifp->if_flags |= IFF_ALLMULTI;
2345 sc->sc_rctl |= RCTL_MPE;
2346
2347 setit:
2348 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2349 }
2350
2351 /*
2352 * wm_tbi_mediainit:
2353 *
2354 * Initialize media for use on 1000BASE-X devices.
2355 */
2356 void
2357 wm_tbi_mediainit(struct wm_softc *sc)
2358 {
2359 const char *sep = "";
2360
2361 if (sc->sc_type < WM_T_LIVENGOOD)
2362 sc->sc_tipg = TIPG_WM_DFLT;
2363 else
2364 sc->sc_tipg = TIPG_LG_DFLT;
2365
2366 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2367 wm_tbi_mediastatus);
2368
2369 /*
2370 * SWD Pins:
2371 *
2372 * 0 = Link LED (output)
2373 * 1 = Loss Of Signal (input)
2374 */
2375 sc->sc_ctrl |= CTRL_SWDPIO(0);
2376 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2377
2378 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2379
2380 #define ADD(s, m, d) \
2381 do { \
2382 printf("%s%s", sep, s); \
2383 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2384 sep = ", "; \
2385 } while (/*CONSTCOND*/0)
2386
2387 printf("%s: ", sc->sc_dev.dv_xname);
2388 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2389 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2390 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2391 printf("\n");
2392
2393 #undef ADD
2394
2395 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2396 }
2397
2398 /*
2399 * wm_tbi_mediastatus: [ifmedia interface function]
2400 *
2401 * Get the current interface media status on a 1000BASE-X device.
2402 */
2403 void
2404 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2405 {
2406 struct wm_softc *sc = ifp->if_softc;
2407
2408 ifmr->ifm_status = IFM_AVALID;
2409 ifmr->ifm_active = IFM_ETHER;
2410
2411 if (sc->sc_tbi_linkup == 0) {
2412 ifmr->ifm_active |= IFM_NONE;
2413 return;
2414 }
2415
2416 ifmr->ifm_status |= IFM_ACTIVE;
2417 ifmr->ifm_active |= IFM_1000_SX;
2418 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2419 ifmr->ifm_active |= IFM_FDX;
2420 }
2421
2422 /*
2423 * wm_tbi_mediachange: [ifmedia interface function]
2424 *
2425 * Set hardware to newly-selected media on a 1000BASE-X device.
2426 */
2427 int
2428 wm_tbi_mediachange(struct ifnet *ifp)
2429 {
2430 struct wm_softc *sc = ifp->if_softc;
2431 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2432 uint32_t status;
2433 int i;
2434
2435 sc->sc_txcw = ife->ifm_data;
2436 if (sc->sc_ctrl & CTRL_RFCE)
2437 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2438 if (sc->sc_ctrl & CTRL_TFCE)
2439 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2440 sc->sc_txcw |= TXCW_ANE;
2441
2442 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2443 delay(10000);
2444
2445 sc->sc_tbi_anstate = 0;
2446
2447 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2448 /* Have signal; wait for the link to come up. */
2449 for (i = 0; i < 50; i++) {
2450 delay(10000);
2451 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2452 break;
2453 }
2454
2455 status = CSR_READ(sc, WMREG_STATUS);
2456 if (status & STATUS_LU) {
2457 /* Link is up. */
2458 DPRINTF(WM_DEBUG_LINK,
2459 ("%s: LINK: set media -> link up %s\n",
2460 sc->sc_dev.dv_xname,
2461 (status & STATUS_FD) ? "FDX" : "HDX"));
2462 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2463 if (status & STATUS_FD)
2464 sc->sc_tctl |=
2465 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2466 else
2467 sc->sc_tctl |=
2468 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2469 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2470 sc->sc_tbi_linkup = 1;
2471 } else {
2472 /* Link is down. */
2473 DPRINTF(WM_DEBUG_LINK,
2474 ("%s: LINK: set media -> link down\n",
2475 sc->sc_dev.dv_xname));
2476 sc->sc_tbi_linkup = 0;
2477 }
2478 } else {
2479 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2480 sc->sc_dev.dv_xname));
2481 sc->sc_tbi_linkup = 0;
2482 }
2483
2484 wm_tbi_set_linkled(sc);
2485
2486 return (0);
2487 }
2488
2489 /*
2490 * wm_tbi_set_linkled:
2491 *
2492 * Update the link LED on 1000BASE-X devices.
2493 */
2494 void
2495 wm_tbi_set_linkled(struct wm_softc *sc)
2496 {
2497
2498 if (sc->sc_tbi_linkup)
2499 sc->sc_ctrl |= CTRL_SWDPIN(0);
2500 else
2501 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2502
2503 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2504 }
2505
2506 /*
2507 * wm_tbi_check_link:
2508 *
2509 * Check the link on 1000BASE-X devices.
2510 */
2511 void
2512 wm_tbi_check_link(struct wm_softc *sc)
2513 {
2514 uint32_t rxcw, ctrl, status;
2515
2516 if (sc->sc_tbi_anstate == 0)
2517 return;
2518 else if (sc->sc_tbi_anstate > 1) {
2519 DPRINTF(WM_DEBUG_LINK,
2520 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2521 sc->sc_tbi_anstate));
2522 sc->sc_tbi_anstate--;
2523 return;
2524 }
2525
2526 sc->sc_tbi_anstate = 0;
2527
2528 rxcw = CSR_READ(sc, WMREG_RXCW);
2529 ctrl = CSR_READ(sc, WMREG_CTRL);
2530 status = CSR_READ(sc, WMREG_STATUS);
2531
2532 if ((status & STATUS_LU) == 0) {
2533 DPRINTF(WM_DEBUG_LINK,
2534 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2535 sc->sc_tbi_linkup = 0;
2536 } else {
2537 DPRINTF(WM_DEBUG_LINK,
2538 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2539 (status & STATUS_FD) ? "FDX" : "HDX"));
2540 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2541 if (status & STATUS_FD)
2542 sc->sc_tctl |=
2543 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2544 else
2545 sc->sc_tctl |=
2546 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2547 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2548 sc->sc_tbi_linkup = 1;
2549 }
2550
2551 wm_tbi_set_linkled(sc);
2552 }
2553
2554 /*
2555 * wm_gmii_reset:
2556 *
2557 * Reset the PHY.
2558 */
2559 void
2560 wm_gmii_reset(struct wm_softc *sc)
2561 {
2562 uint32_t reg;
2563
2564 if (sc->sc_type >= WM_T_CORDOVA) {
2565 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2566 delay(20000);
2567
2568 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2569 delay(20000);
2570 } else {
2571 /* The PHY reset pin is active-low. */
2572 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2573 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2574 CTRL_EXT_SWDPIN(4));
2575 reg |= CTRL_EXT_SWDPIO(4);
2576
2577 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2578 delay(10);
2579
2580 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2581 delay(10);
2582
2583 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2584 delay(10);
2585 #if 0
2586 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2587 #endif
2588 }
2589 }
2590
2591 /*
2592 * wm_gmii_mediainit:
2593 *
2594 * Initialize media for use on 1000BASE-T devices.
2595 */
2596 void
2597 wm_gmii_mediainit(struct wm_softc *sc)
2598 {
2599 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2600
2601 /* We have MII. */
2602 sc->sc_flags |= WM_F_HAS_MII;
2603
2604 sc->sc_tipg = TIPG_1000T_DFLT;
2605
2606 /*
2607 * Let the chip set speed/duplex on its own based on
2608 * signals from the PHY.
2609 */
2610 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2611 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2612
2613 /* Initialize our media structures and probe the GMII. */
2614 sc->sc_mii.mii_ifp = ifp;
2615
2616 if (sc->sc_type >= WM_T_CORDOVA) {
2617 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2618 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2619 } else {
2620 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2621 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2622 }
2623 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2624
2625 wm_gmii_reset(sc);
2626
2627 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2628 wm_gmii_mediastatus);
2629
2630 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2631 MII_OFFSET_ANY, 0);
2632 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2633 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2634 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2635 } else
2636 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2637 }
2638
2639 /*
2640 * wm_gmii_mediastatus: [ifmedia interface function]
2641 *
2642 * Get the current interface media status on a 1000BASE-T device.
2643 */
2644 void
2645 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2646 {
2647 struct wm_softc *sc = ifp->if_softc;
2648
2649 mii_pollstat(&sc->sc_mii);
2650 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2651 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2652 }
2653
2654 /*
2655 * wm_gmii_mediachange: [ifmedia interface function]
2656 *
2657 * Set hardware to newly-selected media on a 1000BASE-T device.
2658 */
2659 int
2660 wm_gmii_mediachange(struct ifnet *ifp)
2661 {
2662 struct wm_softc *sc = ifp->if_softc;
2663
2664 if (ifp->if_flags & IFF_UP)
2665 mii_mediachg(&sc->sc_mii);
2666 return (0);
2667 }
2668
2669 #define MDI_IO CTRL_SWDPIN(2)
2670 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2671 #define MDI_CLK CTRL_SWDPIN(3)
2672
2673 static void
2674 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2675 {
2676 uint32_t i, v;
2677
2678 v = CSR_READ(sc, WMREG_CTRL);
2679 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2680 v |= MDI_DIR | CTRL_SWDPIO(3);
2681
2682 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2683 if (data & i)
2684 v |= MDI_IO;
2685 else
2686 v &= ~MDI_IO;
2687 CSR_WRITE(sc, WMREG_CTRL, v);
2688 delay(10);
2689 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2690 delay(10);
2691 CSR_WRITE(sc, WMREG_CTRL, v);
2692 delay(10);
2693 }
2694 }
2695
2696 static uint32_t
2697 livengood_mii_recvbits(struct wm_softc *sc)
2698 {
2699 uint32_t v, i, data = 0;
2700
2701 v = CSR_READ(sc, WMREG_CTRL);
2702 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2703 v |= CTRL_SWDPIO(3);
2704
2705 CSR_WRITE(sc, WMREG_CTRL, v);
2706 delay(10);
2707 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2708 delay(10);
2709 CSR_WRITE(sc, WMREG_CTRL, v);
2710 delay(10);
2711
2712 for (i = 0; i < 16; i++) {
2713 data <<= 1;
2714 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2715 delay(10);
2716 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2717 data |= 1;
2718 CSR_WRITE(sc, WMREG_CTRL, v);
2719 delay(10);
2720 }
2721
2722 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2723 delay(10);
2724 CSR_WRITE(sc, WMREG_CTRL, v);
2725 delay(10);
2726
2727 return (data);
2728 }
2729
2730 #undef MDI_IO
2731 #undef MDI_DIR
2732 #undef MDI_CLK
2733
2734 /*
2735 * wm_gmii_livengood_readreg: [mii interface function]
2736 *
2737 * Read a PHY register on the GMII (Livengood version).
2738 */
2739 int
2740 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2741 {
2742 struct wm_softc *sc = (void *) self;
2743 int rv;
2744
2745 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2746 livengood_mii_sendbits(sc, reg | (phy << 5) |
2747 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2748 rv = livengood_mii_recvbits(sc) & 0xffff;
2749
2750 DPRINTF(WM_DEBUG_GMII,
2751 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2752 sc->sc_dev.dv_xname, phy, reg, rv));
2753
2754 return (rv);
2755 }
2756
2757 /*
2758 * wm_gmii_livengood_writereg: [mii interface function]
2759 *
2760 * Write a PHY register on the GMII (Livengood version).
2761 */
2762 void
2763 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2764 {
2765 struct wm_softc *sc = (void *) self;
2766
2767 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2768 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2769 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2770 (MII_COMMAND_START << 30), 32);
2771 }
2772
2773 /*
2774 * wm_gmii_cordova_readreg: [mii interface function]
2775 *
2776 * Read a PHY register on the GMII.
2777 */
2778 int
2779 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2780 {
2781 struct wm_softc *sc = (void *) self;
2782 uint32_t mdic;
2783 int i, rv;
2784
2785 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2786 MDIC_REGADD(reg));
2787
2788 for (i = 0; i < 100; i++) {
2789 mdic = CSR_READ(sc, WMREG_MDIC);
2790 if (mdic & MDIC_READY)
2791 break;
2792 delay(10);
2793 }
2794
2795 if ((mdic & MDIC_READY) == 0) {
2796 printf("%s: MDIC read timed out: phy %d reg %d\n",
2797 sc->sc_dev.dv_xname, phy, reg);
2798 rv = 0;
2799 } else if (mdic & MDIC_E) {
2800 #if 0 /* This is normal if no PHY is present. */
2801 printf("%s: MDIC read error: phy %d reg %d\n",
2802 sc->sc_dev.dv_xname, phy, reg);
2803 #endif
2804 rv = 0;
2805 } else {
2806 rv = MDIC_DATA(mdic);
2807 if (rv == 0xffff)
2808 rv = 0;
2809 }
2810
2811 return (rv);
2812 }
2813
2814 /*
2815 * wm_gmii_cordova_writereg: [mii interface function]
2816 *
2817 * Write a PHY register on the GMII.
2818 */
2819 void
2820 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2821 {
2822 struct wm_softc *sc = (void *) self;
2823 uint32_t mdic;
2824 int i;
2825
2826 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2827 MDIC_REGADD(reg) | MDIC_DATA(val));
2828
2829 for (i = 0; i < 100; i++) {
2830 mdic = CSR_READ(sc, WMREG_MDIC);
2831 if (mdic & MDIC_READY)
2832 break;
2833 delay(10);
2834 }
2835
2836 if ((mdic & MDIC_READY) == 0)
2837 printf("%s: MDIC write timed out: phy %d reg %d\n",
2838 sc->sc_dev.dv_xname, phy, reg);
2839 else if (mdic & MDIC_E)
2840 printf("%s: MDIC write error: phy %d reg %d\n",
2841 sc->sc_dev.dv_xname, phy, reg);
2842 }
2843
2844 /*
2845 * wm_gmii_statchg: [mii interface function]
2846 *
2847 * Callback from MII layer when media changes.
2848 */
2849 void
2850 wm_gmii_statchg(struct device *self)
2851 {
2852 struct wm_softc *sc = (void *) self;
2853
2854 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2855
2856 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2857 DPRINTF(WM_DEBUG_LINK,
2858 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2859 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2860 } else {
2861 DPRINTF(WM_DEBUG_LINK,
2862 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2863 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2864 }
2865
2866 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2867 }
2868