if_wm.c revision 1.7 1 /* $NetBSD: if_wm.c,v 1.7 2002/05/08 21:43:10 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40 * and i82544 (``Cordova'') Gigabit Ethernet chips.
41 *
42 * TODO (in order of importance):
43 *
44 * - Fix hw VLAN assist.
45 *
46 * - Make GMII work on the Livengood.
47 *
48 * - Fix out-bound IP header checksums.
49 *
50 * - Fix UDP checksums.
51 *
52 * - Jumbo frames -- requires changes to network stack due to
53 * lame buffer length handling on chip.
54 *
55 * ...and, of course, performance tuning.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and mange
117 * up to 32 of them at a time. We allow up to 16 DMA segments per
118 * packet.
119 */
120 #define WM_NTXSEGS 16
121 #define WM_IFQUEUELEN 256
122 #define WM_TXQUEUELEN 32
123 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * The interrupt mitigation feature of the Wiseman is pretty cool -- as
131 * long as you're transmitting, you don't have to take an interrupt at
132 * all. However, we force an interrupt to happen every N + 1 packets
133 * in order to kick us in a reasonable amount of time when we run out
134 * of descriptors.
135 */
136 #define WM_TXINTR_MASK 7
137
138 /*
139 * Receive descriptor list size. We have one Rx buffer for normal
140 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
141 * packet. We allocate 128 receive descriptors, each with a 2k
142 * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
143 */
144 #define WM_NRXDESC 128
145 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
146 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
147 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
148
149 /*
150 * Control structures are DMA'd to the i82542 chip. We allocate them in
151 * a single clump that maps to a single DMA segment to make serveral things
152 * easier.
153 */
154 struct wm_control_data {
155 /*
156 * The transmit descriptors.
157 */
158 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
159
160 /*
161 * The receive descriptors.
162 */
163 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
164 };
165
166 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
167 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
168 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
169
170 /*
171 * Software state for transmit jobs.
172 */
173 struct wm_txsoft {
174 struct mbuf *txs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t txs_dmamap; /* our DMA map */
176 int txs_firstdesc; /* first descriptor in packet */
177 int txs_lastdesc; /* last descriptor in packet */
178 int txs_ndesc; /* # of descriptors used */
179 };
180
181 /*
182 * Software state for receive buffers. Each descriptor gets a
183 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
184 * more than one buffer, we chain them together.
185 */
186 struct wm_rxsoft {
187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
188 bus_dmamap_t rxs_dmamap; /* our DMA map */
189 };
190
191 /*
192 * Software state per device.
193 */
194 struct wm_softc {
195 struct device sc_dev; /* generic device information */
196 bus_space_tag_t sc_st; /* bus space tag */
197 bus_space_handle_t sc_sh; /* bus space handle */
198 bus_dma_tag_t sc_dmat; /* bus DMA tag */
199 struct ethercom sc_ethercom; /* ethernet common data */
200 void *sc_sdhook; /* shutdown hook */
201
202 int sc_type; /* chip type; see below */
203 int sc_flags; /* flags; see below */
204
205 void *sc_ih; /* interrupt cookie */
206
207 struct mii_data sc_mii; /* MII/media information */
208
209 struct callout sc_tick_ch; /* tick callout */
210
211 bus_dmamap_t sc_cddmamap; /* control data DMA map */
212 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
213
214 /*
215 * Software state for the transmit and receive descriptors.
216 */
217 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
218 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
219
220 /*
221 * Control data structures.
222 */
223 struct wm_control_data *sc_control_data;
224 #define sc_txdescs sc_control_data->wcd_txdescs
225 #define sc_rxdescs sc_control_data->wcd_rxdescs
226
227 #ifdef WM_EVENT_COUNTERS
228 /* Event counters. */
229 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
230 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
231 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
232 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
233 struct evcnt sc_ev_rxintr; /* Rx interrupts */
234 struct evcnt sc_ev_linkintr; /* Link interrupts */
235
236 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
237 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
238 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
239 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
240
241 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
242 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
243 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
244
245 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
246 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
247
248 struct evcnt sc_ev_tu; /* Tx underrun */
249 #endif /* WM_EVENT_COUNTERS */
250
251 bus_addr_t sc_tdt_reg; /* offset of TDT register */
252
253 int sc_txfree; /* number of free Tx descriptors */
254 int sc_txnext; /* next ready Tx descriptor */
255 int sc_txwin; /* Tx descriptors since last Tx int */
256
257 int sc_txsfree; /* number of free Tx jobs */
258 int sc_txsnext; /* next free Tx job */
259 int sc_txsdirty; /* dirty Tx jobs */
260
261 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
262 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
263
264 bus_addr_t sc_rdt_reg; /* offset of RDT register */
265
266 int sc_rxptr; /* next ready Rx descriptor/queue ent */
267 int sc_rxdiscard;
268 int sc_rxlen;
269 struct mbuf *sc_rxhead;
270 struct mbuf *sc_rxtail;
271 struct mbuf **sc_rxtailp;
272
273 uint32_t sc_ctrl; /* prototype CTRL register */
274 #if 0
275 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
276 #endif
277 uint32_t sc_icr; /* prototype interrupt bits */
278 uint32_t sc_tctl; /* prototype TCTL register */
279 uint32_t sc_rctl; /* prototype RCTL register */
280 uint32_t sc_txcw; /* prototype TXCW register */
281 uint32_t sc_tipg; /* prototype TIPG register */
282
283 int sc_tbi_linkup; /* TBI link status */
284 int sc_tbi_anstate; /* autonegotiation state */
285
286 int sc_mchash_type; /* multicast filter offset */
287 };
288
289 #define WM_RXCHAIN_RESET(sc) \
290 do { \
291 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
292 *(sc)->sc_rxtailp = NULL; \
293 (sc)->sc_rxlen = 0; \
294 } while (/*CONSTCOND*/0)
295
296 #define WM_RXCHAIN_LINK(sc, m) \
297 do { \
298 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
299 (sc)->sc_rxtailp = &(m)->m_next; \
300 } while (/*CONSTCOND*/0)
301
302 /* sc_type */
303 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
304 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
305 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
306 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
307
308 /* sc_flags */
309 #define WM_F_HAS_MII 0x01 /* has MII */
310
311 #ifdef WM_EVENT_COUNTERS
312 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
313 #else
314 #define WM_EVCNT_INCR(ev) /* nothing */
315 #endif
316
317 #define CSR_READ(sc, reg) \
318 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
319 #define CSR_WRITE(sc, reg, val) \
320 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
321
322 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
323 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
324
325 #define WM_CDTXSYNC(sc, x, n, ops) \
326 do { \
327 int __x, __n; \
328 \
329 __x = (x); \
330 __n = (n); \
331 \
332 /* If it will wrap around, sync to the end of the ring. */ \
333 if ((__x + __n) > WM_NTXDESC) { \
334 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
335 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
336 (WM_NTXDESC - __x), (ops)); \
337 __n -= (WM_NTXDESC - __x); \
338 __x = 0; \
339 } \
340 \
341 /* Now sync whatever is left. */ \
342 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
343 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
344 } while (/*CONSTCOND*/0)
345
346 #define WM_CDRXSYNC(sc, x, ops) \
347 do { \
348 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
349 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
350 } while (/*CONSTCOND*/0)
351
352 #define WM_INIT_RXDESC(sc, x) \
353 do { \
354 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
355 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
356 struct mbuf *__m = __rxs->rxs_mbuf; \
357 \
358 /* \
359 * Note: We scoot the packet forward 2 bytes in the buffer \
360 * so that the payload after the Ethernet header is aligned \
361 * to a 4-byte boundary. \
362 * \
363 * XXX BRAINDAMAGE ALERT! \
364 * The stupid chip uses the same size for every buffer, which \
365 * is set in the Receive Control register. We are using the 2K \
366 * size option, but what we REALLY want is (2K - 2)! For this \
367 * reason, we can't accept packets longer than the standard \
368 * Ethernet MTU, without incurring a big penalty to copy every \
369 * incoming packet to a new, suitably aligned buffer. \
370 * \
371 * We'll need to make some changes to the layer 3/4 parts of \
372 * the stack (to copy the headers to a new buffer if not \
373 * aligned) in order to support large MTU on this chip. Lame. \
374 */ \
375 __m->m_data = __m->m_ext.ext_buf + 2; \
376 \
377 __rxd->wrx_addr.wa_low = \
378 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
379 __rxd->wrx_addr.wa_high = 0; \
380 __rxd->wrx_len = 0; \
381 __rxd->wrx_cksum = 0; \
382 __rxd->wrx_status = 0; \
383 __rxd->wrx_errors = 0; \
384 __rxd->wrx_special = 0; \
385 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
386 \
387 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
388 } while (/*CONSTCOND*/0)
389
390 void wm_start(struct ifnet *);
391 void wm_watchdog(struct ifnet *);
392 int wm_ioctl(struct ifnet *, u_long, caddr_t);
393 int wm_init(struct ifnet *);
394 void wm_stop(struct ifnet *, int);
395
396 void wm_shutdown(void *);
397
398 void wm_reset(struct wm_softc *);
399 void wm_rxdrain(struct wm_softc *);
400 int wm_add_rxbuf(struct wm_softc *, int);
401 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
402 void wm_tick(void *);
403
404 void wm_set_filter(struct wm_softc *);
405
406 int wm_intr(void *);
407 void wm_txintr(struct wm_softc *);
408 void wm_rxintr(struct wm_softc *);
409 void wm_linkintr(struct wm_softc *, uint32_t);
410
411 void wm_tbi_mediainit(struct wm_softc *);
412 int wm_tbi_mediachange(struct ifnet *);
413 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
414
415 void wm_tbi_set_linkled(struct wm_softc *);
416 void wm_tbi_check_link(struct wm_softc *);
417
418 void wm_gmii_reset(struct wm_softc *);
419
420 int wm_gmii_livengood_readreg(struct device *, int, int);
421 void wm_gmii_livengood_writereg(struct device *, int, int, int);
422
423 int wm_gmii_cordova_readreg(struct device *, int, int);
424 void wm_gmii_cordova_writereg(struct device *, int, int, int);
425
426 void wm_gmii_statchg(struct device *);
427
428 void wm_gmii_mediainit(struct wm_softc *);
429 int wm_gmii_mediachange(struct ifnet *);
430 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
431
432 int wm_match(struct device *, struct cfdata *, void *);
433 void wm_attach(struct device *, struct device *, void *);
434
435 int wm_copy_small = 0;
436
437 struct cfattach wm_ca = {
438 sizeof(struct wm_softc), wm_match, wm_attach,
439 };
440
441 /*
442 * Devices supported by this driver.
443 */
444 const struct wm_product {
445 pci_vendor_id_t wmp_vendor;
446 pci_product_id_t wmp_product;
447 const char *wmp_name;
448 int wmp_type;
449 int wmp_flags;
450 #define WMP_F_1000X 0x01
451 #define WMP_F_1000T 0x02
452 } wm_products[] = {
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
454 "Intel i82542 1000BASE-X Ethernet",
455 WM_T_WISEMAN_2_1, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
458 "Intel i82543 1000BASE-X Ethernet",
459 WM_T_LIVENGOOD, WMP_F_1000X },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
462 "Intel i82543-SC 1000BASE-X Ethernet",
463 WM_T_LIVENGOOD, WMP_F_1000X },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
466 "Intel i82543 1000BASE-T Ethernet",
467 WM_T_LIVENGOOD, WMP_F_1000T },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
470 "Intel i82544 1000BASE-T Ethernet",
471 WM_T_CORDOVA, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
474 "Intel i82544 1000BASE-X Ethernet",
475 WM_T_CORDOVA, WMP_F_1000X },
476
477 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
478 "Intel i82544GC 1000BASE-T Ethernet",
479 WM_T_CORDOVA, WMP_F_1000T },
480
481 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
482 "Intel i82544GC 1000BASE-T Ethernet",
483 WM_T_CORDOVA, WMP_F_1000T },
484
485 { 0, 0,
486 NULL,
487 0, 0 },
488 };
489
490 #ifdef WM_EVENT_COUNTERS
491 #if WM_NTXSEGS != 16
492 #error Update wm_txseg_evcnt_names
493 #endif
494 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
495 "txseg1",
496 "txseg2",
497 "txseg3",
498 "txseg4",
499 "txseg5",
500 "txseg6",
501 "txseg7",
502 "txseg8",
503 "txseg9",
504 "txseg10",
505 "txseg11",
506 "txseg12",
507 "txseg13",
508 "txseg14",
509 "txseg15",
510 "txseg16",
511 };
512 #endif /* WM_EVENT_COUNTERS */
513
514 static const struct wm_product *
515 wm_lookup(const struct pci_attach_args *pa)
516 {
517 const struct wm_product *wmp;
518
519 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
520 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
521 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
522 return (wmp);
523 }
524 return (NULL);
525 }
526
527 int
528 wm_match(struct device *parent, struct cfdata *cf, void *aux)
529 {
530 struct pci_attach_args *pa = aux;
531
532 if (wm_lookup(pa) != NULL)
533 return (1);
534
535 return (0);
536 }
537
538 void
539 wm_attach(struct device *parent, struct device *self, void *aux)
540 {
541 struct wm_softc *sc = (void *) self;
542 struct pci_attach_args *pa = aux;
543 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
544 pci_chipset_tag_t pc = pa->pa_pc;
545 pci_intr_handle_t ih;
546 const char *intrstr = NULL;
547 bus_space_tag_t memt;
548 bus_space_handle_t memh;
549 bus_dma_segment_t seg;
550 int memh_valid;
551 int i, rseg, error;
552 const struct wm_product *wmp;
553 uint8_t enaddr[ETHER_ADDR_LEN];
554 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
555 pcireg_t preg, memtype;
556 int pmreg;
557
558 callout_init(&sc->sc_tick_ch);
559
560 wmp = wm_lookup(pa);
561 if (wmp == NULL) {
562 printf("\n");
563 panic("wm_attach: impossible");
564 }
565
566 sc->sc_dmat = pa->pa_dmat;
567
568 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
569 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
570
571 sc->sc_type = wmp->wmp_type;
572 if (sc->sc_type < WM_T_LIVENGOOD) {
573 if (preg < 2) {
574 printf("%s: Wiseman must be at least rev. 2\n",
575 sc->sc_dev.dv_xname);
576 return;
577 }
578 if (preg < 3)
579 sc->sc_type = WM_T_WISEMAN_2_0;
580 }
581
582 /*
583 * Map the device.
584 */
585 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
586 switch (memtype) {
587 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
588 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
589 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
590 memtype, 0, &memt, &memh, NULL, NULL) == 0);
591 break;
592 default:
593 memh_valid = 0;
594 }
595
596 if (memh_valid) {
597 sc->sc_st = memt;
598 sc->sc_sh = memh;
599 } else {
600 printf("%s: unable to map device registers\n",
601 sc->sc_dev.dv_xname);
602 return;
603 }
604
605 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
606 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
607 preg |= PCI_COMMAND_MASTER_ENABLE;
608 if (sc->sc_type < WM_T_WISEMAN_2_1)
609 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
610 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
611
612 /* Get it out of power save mode, if needed. */
613 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
614 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
615 if (preg == 3) {
616 /*
617 * The card has lost all configuration data in
618 * this state, so punt.
619 */
620 printf("%s: unable to wake from power state D3\n",
621 sc->sc_dev.dv_xname);
622 return;
623 }
624 if (preg != 0) {
625 printf("%s: waking up from power state D%d\n",
626 sc->sc_dev.dv_xname, preg);
627 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
628 }
629 }
630
631 /*
632 * Map and establish our interrupt.
633 */
634 if (pci_intr_map(pa, &ih)) {
635 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
636 return;
637 }
638 intrstr = pci_intr_string(pc, ih);
639 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
640 if (sc->sc_ih == NULL) {
641 printf("%s: unable to establish interrupt",
642 sc->sc_dev.dv_xname);
643 if (intrstr != NULL)
644 printf(" at %s", intrstr);
645 printf("\n");
646 return;
647 }
648 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
649
650 /*
651 * Allocate the control data structures, and create and load the
652 * DMA map for it.
653 */
654 if ((error = bus_dmamem_alloc(sc->sc_dmat,
655 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
656 0)) != 0) {
657 printf("%s: unable to allocate control data, error = %d\n",
658 sc->sc_dev.dv_xname, error);
659 goto fail_0;
660 }
661
662 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
663 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
664 BUS_DMA_COHERENT)) != 0) {
665 printf("%s: unable to map control data, error = %d\n",
666 sc->sc_dev.dv_xname, error);
667 goto fail_1;
668 }
669
670 if ((error = bus_dmamap_create(sc->sc_dmat,
671 sizeof(struct wm_control_data), 1,
672 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
673 printf("%s: unable to create control data DMA map, "
674 "error = %d\n", sc->sc_dev.dv_xname, error);
675 goto fail_2;
676 }
677
678 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
679 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
680 0)) != 0) {
681 printf("%s: unable to load control data DMA map, error = %d\n",
682 sc->sc_dev.dv_xname, error);
683 goto fail_3;
684 }
685
686 /*
687 * Create the transmit buffer DMA maps.
688 */
689 for (i = 0; i < WM_TXQUEUELEN; i++) {
690 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
691 WM_NTXSEGS, MCLBYTES, 0, 0,
692 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
693 printf("%s: unable to create Tx DMA map %d, "
694 "error = %d\n", sc->sc_dev.dv_xname, i, error);
695 goto fail_4;
696 }
697 }
698
699 /*
700 * Create the receive buffer DMA maps.
701 */
702 for (i = 0; i < WM_NRXDESC; i++) {
703 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
704 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
705 printf("%s: unable to create Rx DMA map %d, "
706 "error = %d\n", sc->sc_dev.dv_xname, i, error);
707 goto fail_5;
708 }
709 sc->sc_rxsoft[i].rxs_mbuf = NULL;
710 }
711
712 /*
713 * Reset the chip to a known state.
714 */
715 wm_reset(sc);
716
717 /*
718 * Read the Ethernet address from the EEPROM.
719 */
720 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
721 sizeof(myea) / sizeof(myea[0]), myea);
722 enaddr[0] = myea[0] & 0xff;
723 enaddr[1] = myea[0] >> 8;
724 enaddr[2] = myea[1] & 0xff;
725 enaddr[3] = myea[1] >> 8;
726 enaddr[4] = myea[2] & 0xff;
727 enaddr[5] = myea[2] >> 8;
728
729 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
730 ether_sprintf(enaddr));
731
732 /*
733 * Read the config info from the EEPROM, and set up various
734 * bits in the control registers based on their contents.
735 */
736 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
737 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
738 if (sc->sc_type >= WM_T_CORDOVA)
739 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
740
741 if (cfg1 & EEPROM_CFG1_ILOS)
742 sc->sc_ctrl |= CTRL_ILOS;
743 if (sc->sc_type >= WM_T_CORDOVA) {
744 sc->sc_ctrl |=
745 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
746 CTRL_SWDPIO_SHIFT;
747 sc->sc_ctrl |=
748 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
749 CTRL_SWDPINS_SHIFT;
750 } else {
751 sc->sc_ctrl |=
752 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
753 CTRL_SWDPIO_SHIFT;
754 }
755
756 #if 0
757 if (sc->sc_type >= WM_T_CORDOVA) {
758 if (cfg1 & EEPROM_CFG1_IPS0)
759 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
760 if (cfg1 & EEPROM_CFG1_IPS1)
761 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
762 sc->sc_ctrl_ext |=
763 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
764 CTRL_EXT_SWDPIO_SHIFT;
765 sc->sc_ctrl_ext |=
766 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
767 CTRL_EXT_SWDPINS_SHIFT;
768 } else {
769 sc->sc_ctrl_ext |=
770 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
771 CTRL_EXT_SWDPIO_SHIFT;
772 }
773 #endif
774
775 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
776 #if 0
777 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
778 #endif
779
780 /*
781 * Set up some register offsets that are different between
782 * the Wiseman and the Livengood and later chips.
783 */
784 if (sc->sc_type < WM_T_LIVENGOOD) {
785 sc->sc_rdt_reg = WMREG_OLD_RDT0;
786 sc->sc_tdt_reg = WMREG_OLD_TDT;
787 } else {
788 sc->sc_rdt_reg = WMREG_RDT;
789 sc->sc_tdt_reg = WMREG_TDT;
790 }
791
792 /*
793 * Determine if we should use flow control. We should
794 * always use it, unless we're on a Wiseman < 2.1.
795 */
796 if (sc->sc_type >= WM_T_WISEMAN_2_1)
797 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
798
799 /*
800 * Determine if we're TBI or GMII mode, and initialize the
801 * media structures accordingly.
802 */
803 if (sc->sc_type < WM_T_LIVENGOOD ||
804 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
805 if (wmp->wmp_flags & WMP_F_1000T)
806 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
807 "product!\n", sc->sc_dev.dv_xname);
808 wm_tbi_mediainit(sc);
809 } else {
810 if (wmp->wmp_flags & WMP_F_1000X)
811 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
812 "product!\n", sc->sc_dev.dv_xname);
813 wm_gmii_mediainit(sc);
814 }
815
816 ifp = &sc->sc_ethercom.ec_if;
817 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
818 ifp->if_softc = sc;
819 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
820 ifp->if_ioctl = wm_ioctl;
821 ifp->if_start = wm_start;
822 ifp->if_watchdog = wm_watchdog;
823 ifp->if_init = wm_init;
824 ifp->if_stop = wm_stop;
825 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
826 IFQ_SET_READY(&ifp->if_snd);
827
828 /*
829 * If we're a Livengood or greater, we can support VLANs.
830 */
831 if (sc->sc_type >= WM_T_LIVENGOOD)
832 sc->sc_ethercom.ec_capabilities |=
833 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
834
835 /*
836 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
837 * on Livengood and later.
838 */
839 if (sc->sc_type >= WM_T_LIVENGOOD)
840 ifp->if_capabilities |=
841 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
842
843 /*
844 * Attach the interface.
845 */
846 if_attach(ifp);
847 ether_ifattach(ifp, enaddr);
848
849 #ifdef WM_EVENT_COUNTERS
850 /* Attach event counters. */
851 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
852 NULL, sc->sc_dev.dv_xname, "txsstall");
853 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
854 NULL, sc->sc_dev.dv_xname, "txdstall");
855 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
856 NULL, sc->sc_dev.dv_xname, "txdw");
857 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
858 NULL, sc->sc_dev.dv_xname, "txqe");
859 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
860 NULL, sc->sc_dev.dv_xname, "rxintr");
861 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
862 NULL, sc->sc_dev.dv_xname, "linkintr");
863
864 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
865 NULL, sc->sc_dev.dv_xname, "rxipsum");
866 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
867 NULL, sc->sc_dev.dv_xname, "rxtusum");
868 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
869 NULL, sc->sc_dev.dv_xname, "txipsum");
870 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
871 NULL, sc->sc_dev.dv_xname, "txtusum");
872
873 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
874 NULL, sc->sc_dev.dv_xname, "txctx init");
875 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
876 NULL, sc->sc_dev.dv_xname, "txctx hit");
877 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
878 NULL, sc->sc_dev.dv_xname, "txctx miss");
879
880 for (i = 0; i < WM_NTXSEGS; i++)
881 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
882 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
883
884 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
885 NULL, sc->sc_dev.dv_xname, "txdrop");
886
887 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
888 NULL, sc->sc_dev.dv_xname, "tu");
889 #endif /* WM_EVENT_COUNTERS */
890
891 /*
892 * Make sure the interface is shutdown during reboot.
893 */
894 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
895 if (sc->sc_sdhook == NULL)
896 printf("%s: WARNING: unable to establish shutdown hook\n",
897 sc->sc_dev.dv_xname);
898 return;
899
900 /*
901 * Free any resources we've allocated during the failed attach
902 * attempt. Do this in reverse order and fall through.
903 */
904 fail_5:
905 for (i = 0; i < WM_NRXDESC; i++) {
906 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
907 bus_dmamap_destroy(sc->sc_dmat,
908 sc->sc_rxsoft[i].rxs_dmamap);
909 }
910 fail_4:
911 for (i = 0; i < WM_TXQUEUELEN; i++) {
912 if (sc->sc_txsoft[i].txs_dmamap != NULL)
913 bus_dmamap_destroy(sc->sc_dmat,
914 sc->sc_txsoft[i].txs_dmamap);
915 }
916 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
917 fail_3:
918 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
919 fail_2:
920 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
921 sizeof(struct wm_control_data));
922 fail_1:
923 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
924 fail_0:
925 return;
926 }
927
928 /*
929 * wm_shutdown:
930 *
931 * Make sure the interface is stopped at reboot time.
932 */
933 void
934 wm_shutdown(void *arg)
935 {
936 struct wm_softc *sc = arg;
937
938 wm_stop(&sc->sc_ethercom.ec_if, 1);
939 }
940
941 /*
942 * wm_tx_cksum:
943 *
944 * Set up TCP/IP checksumming parameters for the
945 * specified packet.
946 */
947 static int
948 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
949 uint32_t *fieldsp)
950 {
951 struct mbuf *m0 = txs->txs_mbuf;
952 struct livengood_tcpip_ctxdesc *t;
953 uint32_t fields = 0, ipcs, tucs;
954 struct ip *ip;
955 int offset, iphl;
956
957 /*
958 * XXX It would be nice if the mbuf pkthdr had offset
959 * fields for the protocol headers.
960 */
961
962 /* XXX Assumes normal Ethernet encap. */
963 offset = ETHER_HDR_LEN;
964
965 /* XXX */
966 if (m0->m_len < (offset + sizeof(struct ip))) {
967 printf("%s: wm_tx_cksum: need to m_pullup, "
968 "packet dropped\n", sc->sc_dev.dv_xname);
969 return (EINVAL);
970 }
971
972 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
973 iphl = ip->ip_hl << 2;
974
975 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
976 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
977 fields |= htole32(WTX_IXSM);
978 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
979 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
980 WTX_TCPIP_IPCSE(offset + iphl - 1));
981 } else
982 ipcs = 0;
983
984 offset += iphl;
985
986 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
987 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
988 fields |= htole32(WTX_TXSM);
989 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
990 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
991 WTX_TCPIP_TUCSE(0) /* rest of packet */);
992 } else
993 tucs = 0;
994
995 if (sc->sc_txctx_ipcs == ipcs &&
996 sc->sc_txctx_tucs == tucs) {
997 /* Cached context is fine. */
998 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
999 } else {
1000 /* Fill in the context descriptor. */
1001 #ifdef WM_EVENT_COUNTERS
1002 if (sc->sc_txctx_ipcs == 0xffffffff &&
1003 sc->sc_txctx_tucs == 0xffffffff)
1004 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1005 else
1006 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1007 #endif
1008 t = (struct livengood_tcpip_ctxdesc *)
1009 &sc->sc_txdescs[sc->sc_txnext];
1010 t->tcpip_ipcs = ipcs;
1011 t->tcpip_tucs = tucs;
1012 t->tcpip_cmdlen =
1013 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1014 t->tcpip_seg = 0;
1015 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1016
1017 sc->sc_txctx_ipcs = ipcs;
1018 sc->sc_txctx_tucs = tucs;
1019
1020 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1021 txs->txs_ndesc++;
1022 sc->sc_txwin++;
1023 }
1024
1025 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1026 *fieldsp = fields;
1027
1028 return (0);
1029 }
1030
1031 /*
1032 * wm_start: [ifnet interface function]
1033 *
1034 * Start packet transmission on the interface.
1035 */
1036 void
1037 wm_start(struct ifnet *ifp)
1038 {
1039 struct wm_softc *sc = ifp->if_softc;
1040 struct mbuf *m0/*, *m*/;
1041 struct wm_txsoft *txs;
1042 bus_dmamap_t dmamap;
1043 int error, nexttx, lasttx, ofree, seg;
1044 uint32_t cksumcmd, cksumfields;
1045
1046 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1047 return;
1048
1049 /*
1050 * Remember the previous number of free descriptors.
1051 */
1052 ofree = sc->sc_txfree;
1053
1054 /*
1055 * Loop through the send queue, setting up transmit descriptors
1056 * until we drain the queue, or use up all available transmit
1057 * descriptors.
1058 */
1059 for (;;) {
1060 /* Grab a packet off the queue. */
1061 IFQ_POLL(&ifp->if_snd, m0);
1062 if (m0 == NULL)
1063 break;
1064
1065 DPRINTF(WM_DEBUG_TX,
1066 ("%s: TX: have packet to transmit: %p\n",
1067 sc->sc_dev.dv_xname, m0));
1068
1069 /* Get a work queue entry. */
1070 if (sc->sc_txsfree == 0) {
1071 DPRINTF(WM_DEBUG_TX,
1072 ("%s: TX: no free job descriptors\n",
1073 sc->sc_dev.dv_xname));
1074 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1075 break;
1076 }
1077
1078 txs = &sc->sc_txsoft[sc->sc_txsnext];
1079 dmamap = txs->txs_dmamap;
1080
1081 /*
1082 * Load the DMA map. If this fails, the packet either
1083 * didn't fit in the allotted number of segments, or we
1084 * were short on resources. For the too-many-segments
1085 * case, we simply report an error and drop the packet,
1086 * since we can't sanely copy a jumbo packet to a single
1087 * buffer.
1088 */
1089 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1090 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1091 if (error) {
1092 if (error == EFBIG) {
1093 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1094 printf("%s: Tx packet consumes too many "
1095 "DMA segments, dropping...\n",
1096 sc->sc_dev.dv_xname);
1097 IFQ_DEQUEUE(&ifp->if_snd, m0);
1098 m_freem(m0);
1099 continue;
1100 }
1101 /*
1102 * Short on resources, just stop for now.
1103 */
1104 DPRINTF(WM_DEBUG_TX,
1105 ("%s: TX: dmamap load failed: %d\n",
1106 sc->sc_dev.dv_xname, error));
1107 break;
1108 }
1109
1110 /*
1111 * Ensure we have enough descriptors free to describe
1112 * the packet. Note, we always reserve one descriptor
1113 * at the end of the ring due to the semantics of the
1114 * TDT register, plus one more in the event we need
1115 * to re-load checksum offload context.
1116 */
1117 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1118 /*
1119 * Not enough free descriptors to transmit this
1120 * packet. We haven't committed anything yet,
1121 * so just unload the DMA map, put the packet
1122 * pack on the queue, and punt. Notify the upper
1123 * layer that there are no more slots left.
1124 */
1125 DPRINTF(WM_DEBUG_TX,
1126 ("%s: TX: need %d descriptors, have %d\n",
1127 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1128 sc->sc_txfree - 1));
1129 ifp->if_flags |= IFF_OACTIVE;
1130 bus_dmamap_unload(sc->sc_dmat, dmamap);
1131 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1132 break;
1133 }
1134
1135 IFQ_DEQUEUE(&ifp->if_snd, m0);
1136
1137 /*
1138 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1139 */
1140
1141 /* Sync the DMA map. */
1142 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1143 BUS_DMASYNC_PREWRITE);
1144
1145 DPRINTF(WM_DEBUG_TX,
1146 ("%s: TX: packet has %d DMA segments\n",
1147 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1148
1149 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1150
1151 /*
1152 * Store a pointer to the packet so that we can free it
1153 * later.
1154 *
1155 * Initially, we consider the number of descriptors the
1156 * packet uses the number of DMA segments. This may be
1157 * incremented by 1 if we do checksum offload (a descriptor
1158 * is used to set the checksum context).
1159 */
1160 txs->txs_mbuf = m0;
1161 txs->txs_firstdesc = sc->sc_txnext;
1162 txs->txs_ndesc = dmamap->dm_nsegs;
1163
1164 /*
1165 * Set up checksum offload parameters for
1166 * this packet.
1167 */
1168 if (m0->m_pkthdr.csum_flags &
1169 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1170 if (wm_tx_cksum(sc, txs, &cksumcmd,
1171 &cksumfields) != 0) {
1172 /* Error message already displayed. */
1173 m_freem(m0);
1174 bus_dmamap_unload(sc->sc_dmat, dmamap);
1175 txs->txs_mbuf = NULL;
1176 continue;
1177 }
1178 } else {
1179 cksumcmd = 0;
1180 cksumfields = 0;
1181 }
1182
1183 cksumcmd |= htole32(WTX_CMD_IDE);
1184
1185 /*
1186 * Initialize the transmit descriptor.
1187 */
1188 for (nexttx = sc->sc_txnext, seg = 0;
1189 seg < dmamap->dm_nsegs;
1190 seg++, nexttx = WM_NEXTTX(nexttx)) {
1191 /*
1192 * Note: we currently only use 32-bit DMA
1193 * addresses.
1194 */
1195 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1196 htole32(dmamap->dm_segs[seg].ds_addr);
1197 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1198 htole32(dmamap->dm_segs[seg].ds_len);
1199 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1200 cksumfields;
1201 lasttx = nexttx;
1202
1203 sc->sc_txwin++;
1204
1205 DPRINTF(WM_DEBUG_TX,
1206 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1207 sc->sc_dev.dv_xname, nexttx,
1208 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1209 (uint32_t) dmamap->dm_segs[seg].ds_len));
1210 }
1211
1212 /*
1213 * Set up the command byte on the last descriptor of
1214 * the packet. If we're in the interrupt delay window,
1215 * delay the interrupt.
1216 */
1217 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1218 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1219 if (sc->sc_txwin < (WM_NTXDESC * 2 / 3))
1220 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1221 htole32(WTX_CMD_IDE);
1222 else
1223 sc->sc_txwin = 0;
1224
1225 #if 0 /* XXXJRT */
1226 /*
1227 * If VLANs are enabled and the packet has a VLAN tag, set
1228 * up the descriptor to encapsulate the packet for us.
1229 *
1230 * This is only valid on the last descriptor of the packet.
1231 */
1232 if (sc->sc_ethercom.ec_nvlans != 0 &&
1233 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1234 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1235 htole32(WTX_CMD_VLE);
1236 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1237 = htole16(*mtod(m, int *) & 0xffff);
1238 }
1239 #endif /* XXXJRT */
1240
1241 txs->txs_lastdesc = lasttx;
1242
1243 DPRINTF(WM_DEBUG_TX,
1244 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1245 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1246
1247 /* Sync the descriptors we're using. */
1248 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1249 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1250
1251 /* Give the packet to the chip. */
1252 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1253
1254 DPRINTF(WM_DEBUG_TX,
1255 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1256
1257 DPRINTF(WM_DEBUG_TX,
1258 ("%s: TX: finished transmitting packet, job %d\n",
1259 sc->sc_dev.dv_xname, sc->sc_txsnext));
1260
1261 /* Advance the tx pointer. */
1262 sc->sc_txfree -= txs->txs_ndesc;
1263 sc->sc_txnext = nexttx;
1264
1265 sc->sc_txsfree--;
1266 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1267
1268 #if NBPFILTER > 0
1269 /* Pass the packet to any BPF listeners. */
1270 if (ifp->if_bpf)
1271 bpf_mtap(ifp->if_bpf, m0);
1272 #endif /* NBPFILTER > 0 */
1273 }
1274
1275 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1276 /* No more slots; notify upper layer. */
1277 ifp->if_flags |= IFF_OACTIVE;
1278 }
1279
1280 if (sc->sc_txfree != ofree) {
1281 /* Set a watchdog timer in case the chip flakes out. */
1282 ifp->if_timer = 5;
1283 }
1284 }
1285
1286 /*
1287 * wm_watchdog: [ifnet interface function]
1288 *
1289 * Watchdog timer handler.
1290 */
1291 void
1292 wm_watchdog(struct ifnet *ifp)
1293 {
1294 struct wm_softc *sc = ifp->if_softc;
1295
1296 /*
1297 * Since we're using delayed interrupts, sweep up
1298 * before we report an error.
1299 */
1300 wm_txintr(sc);
1301
1302 if (sc->sc_txfree != WM_NTXDESC) {
1303 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1304 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1305 sc->sc_txnext);
1306 ifp->if_oerrors++;
1307
1308 /* Reset the interface. */
1309 (void) wm_init(ifp);
1310 }
1311
1312 /* Try to get more packets going. */
1313 wm_start(ifp);
1314 }
1315
1316 /*
1317 * wm_ioctl: [ifnet interface function]
1318 *
1319 * Handle control requests from the operator.
1320 */
1321 int
1322 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1323 {
1324 struct wm_softc *sc = ifp->if_softc;
1325 struct ifreq *ifr = (struct ifreq *) data;
1326 int s, error;
1327
1328 s = splnet();
1329
1330 switch (cmd) {
1331 case SIOCSIFMEDIA:
1332 case SIOCGIFMEDIA:
1333 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1334 break;
1335
1336 default:
1337 error = ether_ioctl(ifp, cmd, data);
1338 if (error == ENETRESET) {
1339 /*
1340 * Multicast list has changed; set the hardware filter
1341 * accordingly.
1342 */
1343 wm_set_filter(sc);
1344 error = 0;
1345 }
1346 break;
1347 }
1348
1349 /* Try to get more packets going. */
1350 wm_start(ifp);
1351
1352 splx(s);
1353 return (error);
1354 }
1355
1356 /*
1357 * wm_intr:
1358 *
1359 * Interrupt service routine.
1360 */
1361 int
1362 wm_intr(void *arg)
1363 {
1364 struct wm_softc *sc = arg;
1365 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1366 uint32_t icr;
1367 int wantinit, handled = 0;
1368
1369 for (wantinit = 0; wantinit == 0;) {
1370 icr = CSR_READ(sc, WMREG_ICR);
1371 if ((icr & sc->sc_icr) == 0)
1372 break;
1373
1374 handled = 1;
1375
1376 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1377 DPRINTF(WM_DEBUG_RX,
1378 ("%s: RX: got Rx intr 0x%08x\n",
1379 sc->sc_dev.dv_xname,
1380 icr & (ICR_RXDMT0|ICR_RXT0)));
1381 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1382 wm_rxintr(sc);
1383 }
1384
1385 if (icr & (ICR_TXDW|ICR_TXQE)) {
1386 DPRINTF(WM_DEBUG_TX,
1387 ("%s: TX: got TDXW|TXQE interrupt\n",
1388 sc->sc_dev.dv_xname));
1389 #ifdef WM_EVENT_COUNTERS
1390 if (icr & ICR_TXDW)
1391 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1392 else if (icr & ICR_TXQE)
1393 WM_EVCNT_INCR(&sc->sc_ev_txqe);
1394 #endif
1395 wm_txintr(sc);
1396 }
1397
1398 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1399 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1400 wm_linkintr(sc, icr);
1401 }
1402
1403 if (icr & ICR_RXO) {
1404 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1405 wantinit = 1;
1406 }
1407 }
1408
1409 if (handled) {
1410 if (wantinit)
1411 wm_init(ifp);
1412
1413 /* Try to get more packets going. */
1414 wm_start(ifp);
1415 }
1416
1417 return (handled);
1418 }
1419
1420 /*
1421 * wm_txintr:
1422 *
1423 * Helper; handle transmit interrupts.
1424 */
1425 void
1426 wm_txintr(struct wm_softc *sc)
1427 {
1428 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1429 struct wm_txsoft *txs;
1430 uint8_t status;
1431 int i;
1432
1433 ifp->if_flags &= ~IFF_OACTIVE;
1434
1435 /*
1436 * Go through the Tx list and free mbufs for those
1437 * frams which have been transmitted.
1438 */
1439 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1440 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1441 txs = &sc->sc_txsoft[i];
1442
1443 DPRINTF(WM_DEBUG_TX,
1444 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1445
1446 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1447 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1448
1449 status = le32toh(sc->sc_txdescs[
1450 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1451 if ((status & WTX_ST_DD) == 0)
1452 break;
1453
1454 DPRINTF(WM_DEBUG_TX,
1455 ("%s: TX: job %d done: descs %d..%d\n",
1456 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1457 txs->txs_lastdesc));
1458
1459 /*
1460 * XXX We should probably be using the statistics
1461 * XXX registers, but I don't know if they exist
1462 * XXX on chips before the Cordova.
1463 */
1464
1465 #ifdef WM_EVENT_COUNTERS
1466 if (status & WTX_ST_TU)
1467 WM_EVCNT_INCR(&sc->sc_ev_tu);
1468 #endif /* WM_EVENT_COUNTERS */
1469
1470 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1471 ifp->if_oerrors++;
1472 if (status & WTX_ST_LC)
1473 printf("%s: late collision\n",
1474 sc->sc_dev.dv_xname);
1475 else if (status & WTX_ST_EC) {
1476 ifp->if_collisions += 16;
1477 printf("%s: excessive collisions\n",
1478 sc->sc_dev.dv_xname);
1479 }
1480 } else
1481 ifp->if_opackets++;
1482
1483 sc->sc_txfree += txs->txs_ndesc;
1484 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1485 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1486 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1487 m_freem(txs->txs_mbuf);
1488 txs->txs_mbuf = NULL;
1489 }
1490
1491 /* Update the dirty transmit buffer pointer. */
1492 sc->sc_txsdirty = i;
1493 DPRINTF(WM_DEBUG_TX,
1494 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1495
1496 /*
1497 * If there are no more pending transmissions, cancel the watchdog
1498 * timer.
1499 */
1500 if (sc->sc_txsfree == WM_TXQUEUELEN)
1501 ifp->if_timer = 0;
1502 if (sc->sc_txfree == WM_NTXDESC)
1503 sc->sc_txwin = 0;
1504 }
1505
1506 /*
1507 * wm_rxintr:
1508 *
1509 * Helper; handle receive interrupts.
1510 */
1511 void
1512 wm_rxintr(struct wm_softc *sc)
1513 {
1514 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1515 struct wm_rxsoft *rxs;
1516 struct mbuf *m;
1517 int i, len;
1518 uint8_t status, errors;
1519
1520 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1521 rxs = &sc->sc_rxsoft[i];
1522
1523 DPRINTF(WM_DEBUG_RX,
1524 ("%s: RX: checking descriptor %d\n",
1525 sc->sc_dev.dv_xname, i));
1526
1527 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1528
1529 status = sc->sc_rxdescs[i].wrx_status;
1530 errors = sc->sc_rxdescs[i].wrx_errors;
1531 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1532
1533 if ((status & WRX_ST_DD) == 0) {
1534 /*
1535 * We have processed all of the receive descriptors.
1536 */
1537 break;
1538 }
1539
1540 if (__predict_false(sc->sc_rxdiscard)) {
1541 DPRINTF(WM_DEBUG_RX,
1542 ("%s: RX: discarding contents of descriptor %d\n",
1543 sc->sc_dev.dv_xname, i));
1544 WM_INIT_RXDESC(sc, i);
1545 if (status & WRX_ST_EOP) {
1546 /* Reset our state. */
1547 DPRINTF(WM_DEBUG_RX,
1548 ("%s: RX: resetting rxdiscard -> 0\n",
1549 sc->sc_dev.dv_xname));
1550 sc->sc_rxdiscard = 0;
1551 }
1552 continue;
1553 }
1554
1555 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1556 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1557
1558 m = rxs->rxs_mbuf;
1559
1560 /*
1561 * Add a new receive buffer to the ring.
1562 */
1563 if (wm_add_rxbuf(sc, i) != 0) {
1564 /*
1565 * Failed, throw away what we've done so
1566 * far, and discard the rest of the packet.
1567 */
1568 ifp->if_ierrors++;
1569 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1570 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1571 WM_INIT_RXDESC(sc, i);
1572 if ((status & WRX_ST_EOP) == 0)
1573 sc->sc_rxdiscard = 1;
1574 if (sc->sc_rxhead != NULL)
1575 m_freem(sc->sc_rxhead);
1576 WM_RXCHAIN_RESET(sc);
1577 DPRINTF(WM_DEBUG_RX,
1578 ("%s: RX: Rx buffer allocation failed, "
1579 "dropping packet%s\n", sc->sc_dev.dv_xname,
1580 sc->sc_rxdiscard ? " (discard)" : ""));
1581 continue;
1582 }
1583
1584 WM_RXCHAIN_LINK(sc, m);
1585
1586 m->m_len = len;
1587
1588 DPRINTF(WM_DEBUG_RX,
1589 ("%s: RX: buffer at %p len %d\n",
1590 sc->sc_dev.dv_xname, m->m_data, len));
1591
1592 /*
1593 * If this is not the end of the packet, keep
1594 * looking.
1595 */
1596 if ((status & WRX_ST_EOP) == 0) {
1597 sc->sc_rxlen += len;
1598 DPRINTF(WM_DEBUG_RX,
1599 ("%s: RX: not yet EOP, rxlen -> %d\n",
1600 sc->sc_dev.dv_xname, sc->sc_rxlen));
1601 continue;
1602 }
1603
1604 /*
1605 * Okay, we have the entire packet now...
1606 */
1607 *sc->sc_rxtailp = NULL;
1608 m = sc->sc_rxhead;
1609 len += sc->sc_rxlen;
1610
1611 WM_RXCHAIN_RESET(sc);
1612
1613 DPRINTF(WM_DEBUG_RX,
1614 ("%s: RX: have entire packet, len -> %d\n",
1615 sc->sc_dev.dv_xname, len));
1616
1617 /*
1618 * If an error occurred, update stats and drop the packet.
1619 */
1620 if (errors &
1621 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1622 ifp->if_ierrors++;
1623 if (errors & WRX_ER_SE)
1624 printf("%s: symbol error\n",
1625 sc->sc_dev.dv_xname);
1626 else if (errors & WRX_ER_SEQ)
1627 printf("%s: receive sequence error\n",
1628 sc->sc_dev.dv_xname);
1629 else if (errors & WRX_ER_CE)
1630 printf("%s: CRC error\n",
1631 sc->sc_dev.dv_xname);
1632 m_freem(m);
1633 continue;
1634 }
1635
1636 /*
1637 * No errors. Receive the packet.
1638 *
1639 * Note, we have configured the chip to include the
1640 * CRC with every packet.
1641 */
1642 m->m_flags |= M_HASFCS;
1643 m->m_pkthdr.rcvif = ifp;
1644 m->m_pkthdr.len = len;
1645
1646 #if 0 /* XXXJRT */
1647 /*
1648 * If VLANs are enabled, VLAN packets have been unwrapped
1649 * for us. Associate the tag with the packet.
1650 */
1651 if (sc->sc_ethercom.ec_nvlans != 0 &&
1652 (status & WRX_ST_VP) != 0) {
1653 struct mbuf *vtag;
1654
1655 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1656 if (vtag == NULL) {
1657 ifp->if_ierrors++;
1658 printf("%s: unable to allocate VLAN tag\n",
1659 sc->sc_dev.dv_xname);
1660 m_freem(m);
1661 continue;
1662 }
1663
1664 *mtod(m, int *) =
1665 le16toh(sc->sc_rxdescs[i].wrx_special);
1666 vtag->m_len = sizeof(int);
1667 }
1668 #endif /* XXXJRT */
1669
1670 /*
1671 * Set up checksum info for this packet.
1672 */
1673 if (status & WRX_ST_IPCS) {
1674 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1675 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1676 if (errors & WRX_ER_IPE)
1677 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1678 }
1679 if (status & WRX_ST_TCPCS) {
1680 /*
1681 * Note: we don't know if this was TCP or UDP,
1682 * so we just set both bits, and expect the
1683 * upper layers to deal.
1684 */
1685 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1686 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1687 if (errors & WRX_ER_TCPE)
1688 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1689 }
1690
1691 ifp->if_ipackets++;
1692
1693 #if NBPFILTER > 0
1694 /* Pass this up to any BPF listeners. */
1695 if (ifp->if_bpf)
1696 bpf_mtap(ifp->if_bpf, m);
1697 #endif /* NBPFILTER > 0 */
1698
1699 /* Pass it on. */
1700 (*ifp->if_input)(ifp, m);
1701 }
1702
1703 /* Update the receive pointer. */
1704 sc->sc_rxptr = i;
1705
1706 DPRINTF(WM_DEBUG_RX,
1707 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1708 }
1709
1710 /*
1711 * wm_linkintr:
1712 *
1713 * Helper; handle link interrupts.
1714 */
1715 void
1716 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1717 {
1718 uint32_t status;
1719
1720 /*
1721 * If we get a link status interrupt on a 1000BASE-T
1722 * device, just fall into the normal MII tick path.
1723 */
1724 if (sc->sc_flags & WM_F_HAS_MII) {
1725 if (icr & ICR_LSC) {
1726 DPRINTF(WM_DEBUG_LINK,
1727 ("%s: LINK: LSC -> mii_tick\n",
1728 sc->sc_dev.dv_xname));
1729 mii_tick(&sc->sc_mii);
1730 } else if (icr & ICR_RXSEQ) {
1731 DPRINTF(WM_DEBUG_LINK,
1732 ("%s: LINK Receive sequence error\n",
1733 sc->sc_dev.dv_xname));
1734 }
1735 return;
1736 }
1737
1738 /*
1739 * If we are now receiving /C/, check for link again in
1740 * a couple of link clock ticks.
1741 */
1742 if (icr & ICR_RXCFG) {
1743 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1744 sc->sc_dev.dv_xname));
1745 sc->sc_tbi_anstate = 2;
1746 }
1747
1748 if (icr & ICR_LSC) {
1749 status = CSR_READ(sc, WMREG_STATUS);
1750 if (status & STATUS_LU) {
1751 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1752 sc->sc_dev.dv_xname,
1753 (status & STATUS_FD) ? "FDX" : "HDX"));
1754 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1755 if (status & STATUS_FD)
1756 sc->sc_tctl |=
1757 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1758 else
1759 sc->sc_tctl |=
1760 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1761 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1762 sc->sc_tbi_linkup = 1;
1763 } else {
1764 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1765 sc->sc_dev.dv_xname));
1766 sc->sc_tbi_linkup = 0;
1767 }
1768 sc->sc_tbi_anstate = 2;
1769 wm_tbi_set_linkled(sc);
1770 } else if (icr & ICR_RXSEQ) {
1771 DPRINTF(WM_DEBUG_LINK,
1772 ("%s: LINK: Receive sequence error\n",
1773 sc->sc_dev.dv_xname));
1774 }
1775 }
1776
1777 /*
1778 * wm_tick:
1779 *
1780 * One second timer, used to check link status, sweep up
1781 * completed transmit jobs, etc.
1782 */
1783 void
1784 wm_tick(void *arg)
1785 {
1786 struct wm_softc *sc = arg;
1787 int s;
1788
1789 s = splnet();
1790
1791 if (sc->sc_flags & WM_F_HAS_MII)
1792 mii_tick(&sc->sc_mii);
1793 else
1794 wm_tbi_check_link(sc);
1795
1796 splx(s);
1797
1798 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1799 }
1800
1801 /*
1802 * wm_reset:
1803 *
1804 * Reset the i82542 chip.
1805 */
1806 void
1807 wm_reset(struct wm_softc *sc)
1808 {
1809 int i;
1810
1811 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1812 delay(10000);
1813
1814 for (i = 0; i < 1000; i++) {
1815 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1816 return;
1817 delay(20);
1818 }
1819
1820 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1821 printf("%s: WARNING: reset failed to complete\n",
1822 sc->sc_dev.dv_xname);
1823 }
1824
1825 /*
1826 * wm_init: [ifnet interface function]
1827 *
1828 * Initialize the interface. Must be called at splnet().
1829 */
1830 int
1831 wm_init(struct ifnet *ifp)
1832 {
1833 struct wm_softc *sc = ifp->if_softc;
1834 struct wm_rxsoft *rxs;
1835 int i, error = 0;
1836 uint32_t reg;
1837
1838 /* Cancel any pending I/O. */
1839 wm_stop(ifp, 0);
1840
1841 /* Reset the chip to a known state. */
1842 wm_reset(sc);
1843
1844 /* Initialize the transmit descriptor ring. */
1845 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1846 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1847 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1848 sc->sc_txfree = WM_NTXDESC;
1849 sc->sc_txnext = 0;
1850 sc->sc_txwin = 0;
1851
1852 sc->sc_txctx_ipcs = 0xffffffff;
1853 sc->sc_txctx_tucs = 0xffffffff;
1854
1855 if (sc->sc_type < WM_T_LIVENGOOD) {
1856 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1857 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1858 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1859 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1860 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1861 CSR_WRITE(sc, WMREG_OLD_TIDV, 64);
1862 } else {
1863 CSR_WRITE(sc, WMREG_TBDAH, 0);
1864 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1865 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1866 CSR_WRITE(sc, WMREG_TDH, 0);
1867 CSR_WRITE(sc, WMREG_TDT, 0);
1868 CSR_WRITE(sc, WMREG_TIDV, 64);
1869
1870 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1871 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1872 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1873 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1874 }
1875 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1876 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1877
1878 /* Initialize the transmit job descriptors. */
1879 for (i = 0; i < WM_TXQUEUELEN; i++)
1880 sc->sc_txsoft[i].txs_mbuf = NULL;
1881 sc->sc_txsfree = WM_TXQUEUELEN;
1882 sc->sc_txsnext = 0;
1883 sc->sc_txsdirty = 0;
1884
1885 /*
1886 * Initialize the receive descriptor and receive job
1887 * descriptor rings.
1888 */
1889 if (sc->sc_type < WM_T_LIVENGOOD) {
1890 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1891 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1892 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1893 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1894 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1895 CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1896
1897 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1898 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1899 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1900 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1901 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1902 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1903 } else {
1904 CSR_WRITE(sc, WMREG_RDBAH, 0);
1905 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1906 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1907 CSR_WRITE(sc, WMREG_RDH, 0);
1908 CSR_WRITE(sc, WMREG_RDT, 0);
1909 CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1910 }
1911 for (i = 0; i < WM_NRXDESC; i++) {
1912 rxs = &sc->sc_rxsoft[i];
1913 if (rxs->rxs_mbuf == NULL) {
1914 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1915 printf("%s: unable to allocate or map rx "
1916 "buffer %d, error = %d\n",
1917 sc->sc_dev.dv_xname, i, error);
1918 /*
1919 * XXX Should attempt to run with fewer receive
1920 * XXX buffers instead of just failing.
1921 */
1922 wm_rxdrain(sc);
1923 goto out;
1924 }
1925 } else
1926 WM_INIT_RXDESC(sc, i);
1927 }
1928 sc->sc_rxptr = 0;
1929 sc->sc_rxdiscard = 0;
1930 WM_RXCHAIN_RESET(sc);
1931
1932 /*
1933 * Clear out the VLAN table -- we don't use it (yet).
1934 */
1935 CSR_WRITE(sc, WMREG_VET, 0);
1936 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1937 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1938
1939 /*
1940 * Set up flow-control parameters.
1941 *
1942 * XXX Values could probably stand some tuning.
1943 */
1944 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1945 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1946 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1947 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1948
1949 if (sc->sc_type < WM_T_LIVENGOOD) {
1950 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1951 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1952 } else {
1953 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1954 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1955 }
1956 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1957 }
1958
1959 #if 0 /* XXXJRT */
1960 /* Deal with VLAN enables. */
1961 if (sc->sc_ethercom.ec_nvlans != 0)
1962 sc->sc_ctrl |= CTRL_VME;
1963 else
1964 #endif /* XXXJRT */
1965 sc->sc_ctrl &= ~CTRL_VME;
1966
1967 /* Write the control registers. */
1968 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1969 #if 0
1970 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1971 #endif
1972
1973 /*
1974 * Set up checksum offload parameters.
1975 */
1976 reg = CSR_READ(sc, WMREG_RXCSUM);
1977 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1978 reg |= RXCSUM_IPOFL;
1979 else
1980 reg &= ~RXCSUM_IPOFL;
1981 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1982 reg |= RXCSUM_TUOFL;
1983 else
1984 reg &= ~RXCSUM_TUOFL;
1985 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1986
1987 /*
1988 * Set up the interrupt registers.
1989 */
1990 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1991 sc->sc_icr = ICR_TXDW | ICR_TXQE | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1992 ICR_RXO | ICR_RXT0;
1993 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1994 sc->sc_icr |= ICR_RXCFG;
1995 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1996
1997 /* Set up the inter-packet gap. */
1998 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1999
2000 #if 0 /* XXXJRT */
2001 /* Set the VLAN ethernetype. */
2002 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2003 #endif
2004
2005 /*
2006 * Set up the transmit control register; we start out with
2007 * a collision distance suitable for FDX, but update it whe
2008 * we resolve the media type.
2009 */
2010 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2011 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2012 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2013
2014 /* Set the media. */
2015 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2016
2017 /*
2018 * Set up the receive control register; we actually program
2019 * the register when we set the receive filter. Use multicast
2020 * address offset type 0.
2021 *
2022 * Only the Cordova has the ability to strip the incoming
2023 * CRC, so we don't enable that feature.
2024 */
2025 sc->sc_mchash_type = 0;
2026 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2027 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2028
2029 /* Set the receive filter. */
2030 wm_set_filter(sc);
2031
2032 /* Start the one second link check clock. */
2033 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2034
2035 /* ...all done! */
2036 ifp->if_flags |= IFF_RUNNING;
2037 ifp->if_flags &= ~IFF_OACTIVE;
2038
2039 out:
2040 if (error)
2041 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2042 return (error);
2043 }
2044
2045 /*
2046 * wm_rxdrain:
2047 *
2048 * Drain the receive queue.
2049 */
2050 void
2051 wm_rxdrain(struct wm_softc *sc)
2052 {
2053 struct wm_rxsoft *rxs;
2054 int i;
2055
2056 for (i = 0; i < WM_NRXDESC; i++) {
2057 rxs = &sc->sc_rxsoft[i];
2058 if (rxs->rxs_mbuf != NULL) {
2059 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2060 m_freem(rxs->rxs_mbuf);
2061 rxs->rxs_mbuf = NULL;
2062 }
2063 }
2064 }
2065
2066 /*
2067 * wm_stop: [ifnet interface function]
2068 *
2069 * Stop transmission on the interface.
2070 */
2071 void
2072 wm_stop(struct ifnet *ifp, int disable)
2073 {
2074 struct wm_softc *sc = ifp->if_softc;
2075 struct wm_txsoft *txs;
2076 int i;
2077
2078 /* Stop the one second clock. */
2079 callout_stop(&sc->sc_tick_ch);
2080
2081 if (sc->sc_flags & WM_F_HAS_MII) {
2082 /* Down the MII. */
2083 mii_down(&sc->sc_mii);
2084 }
2085
2086 /* Stop the transmit and receive processes. */
2087 CSR_WRITE(sc, WMREG_TCTL, 0);
2088 CSR_WRITE(sc, WMREG_RCTL, 0);
2089
2090 /* Release any queued transmit buffers. */
2091 for (i = 0; i < WM_TXQUEUELEN; i++) {
2092 txs = &sc->sc_txsoft[i];
2093 if (txs->txs_mbuf != NULL) {
2094 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2095 m_freem(txs->txs_mbuf);
2096 txs->txs_mbuf = NULL;
2097 }
2098 }
2099
2100 if (disable)
2101 wm_rxdrain(sc);
2102
2103 /* Mark the interface as down and cancel the watchdog timer. */
2104 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2105 ifp->if_timer = 0;
2106 }
2107
2108 /*
2109 * wm_read_eeprom:
2110 *
2111 * Read data from the serial EEPROM.
2112 */
2113 void
2114 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2115 {
2116 uint32_t reg;
2117 int i, x;
2118
2119 for (i = 0; i < wordcnt; i++) {
2120 /* Send CHIP SELECT for one clock tick. */
2121 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2122 delay(2);
2123
2124 /* Shift in the READ command. */
2125 for (x = 3; x > 0; x--) {
2126 reg = EECD_CS;
2127 if (UWIRE_OPC_READ & (1 << (x - 1)))
2128 reg |= EECD_DI;
2129 CSR_WRITE(sc, WMREG_EECD, reg);
2130 delay(2);
2131 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2132 delay(2);
2133 CSR_WRITE(sc, WMREG_EECD, reg);
2134 delay(2);
2135 }
2136
2137 /* Shift in address. */
2138 for (x = 6; x > 0; x--) {
2139 reg = EECD_CS;
2140 if ((word + i) & (1 << (x - 1)))
2141 reg |= EECD_DI;
2142 CSR_WRITE(sc, WMREG_EECD, reg);
2143 delay(2);
2144 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2145 delay(2);
2146 CSR_WRITE(sc, WMREG_EECD, reg);
2147 delay(2);
2148 }
2149
2150 /* Shift out the data. */
2151 reg = EECD_CS;
2152 data[i] = 0;
2153 for (x = 16; x > 0; x--) {
2154 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2155 delay(2);
2156 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2157 data[i] |= (1 << (x - 1));
2158 CSR_WRITE(sc, WMREG_EECD, reg);
2159 delay(2);
2160 }
2161
2162 /* Clear CHIP SELECT. */
2163 CSR_WRITE(sc, WMREG_EECD, 0);
2164 }
2165 }
2166
2167 /*
2168 * wm_add_rxbuf:
2169 *
2170 * Add a receive buffer to the indiciated descriptor.
2171 */
2172 int
2173 wm_add_rxbuf(struct wm_softc *sc, int idx)
2174 {
2175 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2176 struct mbuf *m;
2177 int error;
2178
2179 MGETHDR(m, M_DONTWAIT, MT_DATA);
2180 if (m == NULL)
2181 return (ENOBUFS);
2182
2183 MCLGET(m, M_DONTWAIT);
2184 if ((m->m_flags & M_EXT) == 0) {
2185 m_freem(m);
2186 return (ENOBUFS);
2187 }
2188
2189 if (rxs->rxs_mbuf != NULL)
2190 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2191
2192 rxs->rxs_mbuf = m;
2193
2194 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2195 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2196 BUS_DMA_READ|BUS_DMA_NOWAIT);
2197 if (error) {
2198 printf("%s: unable to load rx DMA map %d, error = %d\n",
2199 sc->sc_dev.dv_xname, idx, error);
2200 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2201 }
2202
2203 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2204 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2205
2206 WM_INIT_RXDESC(sc, idx);
2207
2208 return (0);
2209 }
2210
2211 /*
2212 * wm_set_ral:
2213 *
2214 * Set an entery in the receive address list.
2215 */
2216 static void
2217 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2218 {
2219 uint32_t ral_lo, ral_hi;
2220
2221 if (enaddr != NULL) {
2222 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2223 (enaddr[3] << 24);
2224 ral_hi = enaddr[4] | (enaddr[5] << 8);
2225 ral_hi |= RAL_AV;
2226 } else {
2227 ral_lo = 0;
2228 ral_hi = 0;
2229 }
2230
2231 if (sc->sc_type >= WM_T_CORDOVA) {
2232 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2233 ral_lo);
2234 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2235 ral_hi);
2236 } else {
2237 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2238 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2239 }
2240 }
2241
2242 /*
2243 * wm_mchash:
2244 *
2245 * Compute the hash of the multicast address for the 4096-bit
2246 * multicast filter.
2247 */
2248 static uint32_t
2249 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2250 {
2251 static const int lo_shift[4] = { 4, 3, 2, 0 };
2252 static const int hi_shift[4] = { 4, 5, 6, 8 };
2253 uint32_t hash;
2254
2255 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2256 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2257
2258 return (hash & 0xfff);
2259 }
2260
2261 /*
2262 * wm_set_filter:
2263 *
2264 * Set up the receive filter.
2265 */
2266 void
2267 wm_set_filter(struct wm_softc *sc)
2268 {
2269 struct ethercom *ec = &sc->sc_ethercom;
2270 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2271 struct ether_multi *enm;
2272 struct ether_multistep step;
2273 bus_addr_t mta_reg;
2274 uint32_t hash, reg, bit;
2275 int i;
2276
2277 if (sc->sc_type >= WM_T_CORDOVA)
2278 mta_reg = WMREG_CORDOVA_MTA;
2279 else
2280 mta_reg = WMREG_MTA;
2281
2282 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2283
2284 if (ifp->if_flags & IFF_BROADCAST)
2285 sc->sc_rctl |= RCTL_BAM;
2286 if (ifp->if_flags & IFF_PROMISC) {
2287 sc->sc_rctl |= RCTL_UPE;
2288 goto allmulti;
2289 }
2290
2291 /*
2292 * Set the station address in the first RAL slot, and
2293 * clear the remaining slots.
2294 */
2295 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2296 for (i = 1; i < WM_RAL_TABSIZE; i++)
2297 wm_set_ral(sc, NULL, i);
2298
2299 /* Clear out the multicast table. */
2300 for (i = 0; i < WM_MC_TABSIZE; i++)
2301 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2302
2303 ETHER_FIRST_MULTI(step, ec, enm);
2304 while (enm != NULL) {
2305 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2306 /*
2307 * We must listen to a range of multicast addresses.
2308 * For now, just accept all multicasts, rather than
2309 * trying to set only those filter bits needed to match
2310 * the range. (At this time, the only use of address
2311 * ranges is for IP multicast routing, for which the
2312 * range is big enough to require all bits set.)
2313 */
2314 goto allmulti;
2315 }
2316
2317 hash = wm_mchash(sc, enm->enm_addrlo);
2318
2319 reg = (hash >> 5) & 0x7f;
2320 bit = hash & 0x1f;
2321
2322 hash = CSR_READ(sc, mta_reg + (reg << 2));
2323 hash |= 1U << bit;
2324
2325 /* XXX Hardware bug?? */
2326 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2327 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2328 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2329 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2330 } else
2331 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2332
2333 ETHER_NEXT_MULTI(step, enm);
2334 }
2335
2336 ifp->if_flags &= ~IFF_ALLMULTI;
2337 goto setit;
2338
2339 allmulti:
2340 ifp->if_flags |= IFF_ALLMULTI;
2341 sc->sc_rctl |= RCTL_MPE;
2342
2343 setit:
2344 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2345 }
2346
2347 /*
2348 * wm_tbi_mediainit:
2349 *
2350 * Initialize media for use on 1000BASE-X devices.
2351 */
2352 void
2353 wm_tbi_mediainit(struct wm_softc *sc)
2354 {
2355 const char *sep = "";
2356
2357 if (sc->sc_type < WM_T_LIVENGOOD)
2358 sc->sc_tipg = TIPG_WM_DFLT;
2359 else
2360 sc->sc_tipg = TIPG_LG_DFLT;
2361
2362 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2363 wm_tbi_mediastatus);
2364
2365 /*
2366 * SWD Pins:
2367 *
2368 * 0 = Link LED (output)
2369 * 1 = Loss Of Signal (input)
2370 */
2371 sc->sc_ctrl |= CTRL_SWDPIO(0);
2372 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2373
2374 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2375
2376 #define ADD(s, m, d) \
2377 do { \
2378 printf("%s%s", sep, s); \
2379 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2380 sep = ", "; \
2381 } while (/*CONSTCOND*/0)
2382
2383 printf("%s: ", sc->sc_dev.dv_xname);
2384 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2385 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2386 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2387 printf("\n");
2388
2389 #undef ADD
2390
2391 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2392 }
2393
2394 /*
2395 * wm_tbi_mediastatus: [ifmedia interface function]
2396 *
2397 * Get the current interface media status on a 1000BASE-X device.
2398 */
2399 void
2400 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2401 {
2402 struct wm_softc *sc = ifp->if_softc;
2403
2404 ifmr->ifm_status = IFM_AVALID;
2405 ifmr->ifm_active = IFM_ETHER;
2406
2407 if (sc->sc_tbi_linkup == 0) {
2408 ifmr->ifm_active |= IFM_NONE;
2409 return;
2410 }
2411
2412 ifmr->ifm_status |= IFM_ACTIVE;
2413 ifmr->ifm_active |= IFM_1000_SX;
2414 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2415 ifmr->ifm_active |= IFM_FDX;
2416 }
2417
2418 /*
2419 * wm_tbi_mediachange: [ifmedia interface function]
2420 *
2421 * Set hardware to newly-selected media on a 1000BASE-X device.
2422 */
2423 int
2424 wm_tbi_mediachange(struct ifnet *ifp)
2425 {
2426 struct wm_softc *sc = ifp->if_softc;
2427 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2428 uint32_t status;
2429 int i;
2430
2431 sc->sc_txcw = ife->ifm_data;
2432 if (sc->sc_ctrl & CTRL_RFCE)
2433 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2434 if (sc->sc_ctrl & CTRL_TFCE)
2435 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2436 sc->sc_txcw |= TXCW_ANE;
2437
2438 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2439 delay(10000);
2440
2441 sc->sc_tbi_anstate = 0;
2442
2443 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2444 /* Have signal; wait for the link to come up. */
2445 for (i = 0; i < 50; i++) {
2446 delay(10000);
2447 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2448 break;
2449 }
2450
2451 status = CSR_READ(sc, WMREG_STATUS);
2452 if (status & STATUS_LU) {
2453 /* Link is up. */
2454 DPRINTF(WM_DEBUG_LINK,
2455 ("%s: LINK: set media -> link up %s\n",
2456 sc->sc_dev.dv_xname,
2457 (status & STATUS_FD) ? "FDX" : "HDX"));
2458 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2459 if (status & STATUS_FD)
2460 sc->sc_tctl |=
2461 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2462 else
2463 sc->sc_tctl |=
2464 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2465 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2466 sc->sc_tbi_linkup = 1;
2467 } else {
2468 /* Link is down. */
2469 DPRINTF(WM_DEBUG_LINK,
2470 ("%s: LINK: set media -> link down\n",
2471 sc->sc_dev.dv_xname));
2472 sc->sc_tbi_linkup = 0;
2473 }
2474 } else {
2475 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2476 sc->sc_dev.dv_xname));
2477 sc->sc_tbi_linkup = 0;
2478 }
2479
2480 wm_tbi_set_linkled(sc);
2481
2482 return (0);
2483 }
2484
2485 /*
2486 * wm_tbi_set_linkled:
2487 *
2488 * Update the link LED on 1000BASE-X devices.
2489 */
2490 void
2491 wm_tbi_set_linkled(struct wm_softc *sc)
2492 {
2493
2494 if (sc->sc_tbi_linkup)
2495 sc->sc_ctrl |= CTRL_SWDPIN(0);
2496 else
2497 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2498
2499 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2500 }
2501
2502 /*
2503 * wm_tbi_check_link:
2504 *
2505 * Check the link on 1000BASE-X devices.
2506 */
2507 void
2508 wm_tbi_check_link(struct wm_softc *sc)
2509 {
2510 uint32_t rxcw, ctrl, status;
2511
2512 if (sc->sc_tbi_anstate == 0)
2513 return;
2514 else if (sc->sc_tbi_anstate > 1) {
2515 DPRINTF(WM_DEBUG_LINK,
2516 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2517 sc->sc_tbi_anstate));
2518 sc->sc_tbi_anstate--;
2519 return;
2520 }
2521
2522 sc->sc_tbi_anstate = 0;
2523
2524 rxcw = CSR_READ(sc, WMREG_RXCW);
2525 ctrl = CSR_READ(sc, WMREG_CTRL);
2526 status = CSR_READ(sc, WMREG_STATUS);
2527
2528 if ((status & STATUS_LU) == 0) {
2529 DPRINTF(WM_DEBUG_LINK,
2530 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2531 sc->sc_tbi_linkup = 0;
2532 } else {
2533 DPRINTF(WM_DEBUG_LINK,
2534 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2535 (status & STATUS_FD) ? "FDX" : "HDX"));
2536 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2537 if (status & STATUS_FD)
2538 sc->sc_tctl |=
2539 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2540 else
2541 sc->sc_tctl |=
2542 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2543 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2544 sc->sc_tbi_linkup = 1;
2545 }
2546
2547 wm_tbi_set_linkled(sc);
2548 }
2549
2550 /*
2551 * wm_gmii_reset:
2552 *
2553 * Reset the PHY.
2554 */
2555 void
2556 wm_gmii_reset(struct wm_softc *sc)
2557 {
2558 uint32_t reg;
2559
2560 if (sc->sc_type >= WM_T_CORDOVA) {
2561 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2562 delay(20000);
2563
2564 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2565 delay(20000);
2566 } else {
2567 /* The PHY reset pin is active-low. */
2568 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2569 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2570 CTRL_EXT_SWDPIN(4));
2571 reg |= CTRL_EXT_SWDPIO(4);
2572
2573 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2574 delay(10);
2575
2576 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2577 delay(10);
2578
2579 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2580 delay(10);
2581 #if 0
2582 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2583 #endif
2584 }
2585 }
2586
2587 /*
2588 * wm_gmii_mediainit:
2589 *
2590 * Initialize media for use on 1000BASE-T devices.
2591 */
2592 void
2593 wm_gmii_mediainit(struct wm_softc *sc)
2594 {
2595 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2596
2597 /* We have MII. */
2598 sc->sc_flags |= WM_F_HAS_MII;
2599
2600 sc->sc_tipg = TIPG_1000T_DFLT;
2601
2602 /*
2603 * Let the chip set speed/duplex on its own based on
2604 * signals from the PHY.
2605 */
2606 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2607 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2608
2609 /* Initialize our media structures and probe the GMII. */
2610 sc->sc_mii.mii_ifp = ifp;
2611
2612 if (sc->sc_type >= WM_T_CORDOVA) {
2613 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2614 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2615 } else {
2616 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2617 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2618 }
2619 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2620
2621 wm_gmii_reset(sc);
2622
2623 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2624 wm_gmii_mediastatus);
2625
2626 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2627 MII_OFFSET_ANY, 0);
2628 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2629 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2630 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2631 } else
2632 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2633 }
2634
2635 /*
2636 * wm_gmii_mediastatus: [ifmedia interface function]
2637 *
2638 * Get the current interface media status on a 1000BASE-T device.
2639 */
2640 void
2641 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2642 {
2643 struct wm_softc *sc = ifp->if_softc;
2644
2645 mii_pollstat(&sc->sc_mii);
2646 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2647 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2648 }
2649
2650 /*
2651 * wm_gmii_mediachange: [ifmedia interface function]
2652 *
2653 * Set hardware to newly-selected media on a 1000BASE-T device.
2654 */
2655 int
2656 wm_gmii_mediachange(struct ifnet *ifp)
2657 {
2658 struct wm_softc *sc = ifp->if_softc;
2659
2660 if (ifp->if_flags & IFF_UP)
2661 mii_mediachg(&sc->sc_mii);
2662 return (0);
2663 }
2664
2665 #define MDI_IO CTRL_SWDPIN(2)
2666 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2667 #define MDI_CLK CTRL_SWDPIN(3)
2668
2669 static void
2670 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2671 {
2672 uint32_t i, v;
2673
2674 v = CSR_READ(sc, WMREG_CTRL);
2675 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2676 v |= MDI_DIR | CTRL_SWDPIO(3);
2677
2678 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2679 if (data & i)
2680 v |= MDI_IO;
2681 else
2682 v &= ~MDI_IO;
2683 CSR_WRITE(sc, WMREG_CTRL, v);
2684 delay(10);
2685 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2686 delay(10);
2687 CSR_WRITE(sc, WMREG_CTRL, v);
2688 delay(10);
2689 }
2690 }
2691
2692 static uint32_t
2693 livengood_mii_recvbits(struct wm_softc *sc)
2694 {
2695 uint32_t v, i, data = 0;
2696
2697 v = CSR_READ(sc, WMREG_CTRL);
2698 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2699 v |= CTRL_SWDPIO(3);
2700
2701 CSR_WRITE(sc, WMREG_CTRL, v);
2702 delay(10);
2703 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2704 delay(10);
2705 CSR_WRITE(sc, WMREG_CTRL, v);
2706 delay(10);
2707
2708 for (i = 0; i < 16; i++) {
2709 data <<= 1;
2710 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2711 delay(10);
2712 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2713 data |= 1;
2714 CSR_WRITE(sc, WMREG_CTRL, v);
2715 delay(10);
2716 }
2717
2718 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2719 delay(10);
2720 CSR_WRITE(sc, WMREG_CTRL, v);
2721 delay(10);
2722
2723 return (data);
2724 }
2725
2726 #undef MDI_IO
2727 #undef MDI_DIR
2728 #undef MDI_CLK
2729
2730 /*
2731 * wm_gmii_livengood_readreg: [mii interface function]
2732 *
2733 * Read a PHY register on the GMII (Livengood version).
2734 */
2735 int
2736 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2737 {
2738 struct wm_softc *sc = (void *) self;
2739 int rv;
2740
2741 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2742 livengood_mii_sendbits(sc, reg | (phy << 5) |
2743 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2744 rv = livengood_mii_recvbits(sc) & 0xffff;
2745
2746 DPRINTF(WM_DEBUG_GMII,
2747 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2748 sc->sc_dev.dv_xname, phy, reg, rv));
2749
2750 return (rv);
2751 }
2752
2753 /*
2754 * wm_gmii_livengood_writereg: [mii interface function]
2755 *
2756 * Write a PHY register on the GMII (Livengood version).
2757 */
2758 void
2759 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2760 {
2761 struct wm_softc *sc = (void *) self;
2762
2763 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2764 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2765 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2766 (MII_COMMAND_START << 30), 32);
2767 }
2768
2769 /*
2770 * wm_gmii_cordova_readreg: [mii interface function]
2771 *
2772 * Read a PHY register on the GMII.
2773 */
2774 int
2775 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2776 {
2777 struct wm_softc *sc = (void *) self;
2778 uint32_t mdic;
2779 int i, rv;
2780
2781 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2782 MDIC_REGADD(reg));
2783
2784 for (i = 0; i < 100; i++) {
2785 mdic = CSR_READ(sc, WMREG_MDIC);
2786 if (mdic & MDIC_READY)
2787 break;
2788 delay(10);
2789 }
2790
2791 if ((mdic & MDIC_READY) == 0) {
2792 printf("%s: MDIC read timed out: phy %d reg %d\n",
2793 sc->sc_dev.dv_xname, phy, reg);
2794 rv = 0;
2795 } else if (mdic & MDIC_E) {
2796 #if 0 /* This is normal if no PHY is present. */
2797 printf("%s: MDIC read error: phy %d reg %d\n",
2798 sc->sc_dev.dv_xname, phy, reg);
2799 #endif
2800 rv = 0;
2801 } else {
2802 rv = MDIC_DATA(mdic);
2803 if (rv == 0xffff)
2804 rv = 0;
2805 }
2806
2807 return (rv);
2808 }
2809
2810 /*
2811 * wm_gmii_cordova_writereg: [mii interface function]
2812 *
2813 * Write a PHY register on the GMII.
2814 */
2815 void
2816 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2817 {
2818 struct wm_softc *sc = (void *) self;
2819 uint32_t mdic;
2820 int i;
2821
2822 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2823 MDIC_REGADD(reg) | MDIC_DATA(val));
2824
2825 for (i = 0; i < 100; i++) {
2826 mdic = CSR_READ(sc, WMREG_MDIC);
2827 if (mdic & MDIC_READY)
2828 break;
2829 delay(10);
2830 }
2831
2832 if ((mdic & MDIC_READY) == 0)
2833 printf("%s: MDIC write timed out: phy %d reg %d\n",
2834 sc->sc_dev.dv_xname, phy, reg);
2835 else if (mdic & MDIC_E)
2836 printf("%s: MDIC write error: phy %d reg %d\n",
2837 sc->sc_dev.dv_xname, phy, reg);
2838 }
2839
2840 /*
2841 * wm_gmii_statchg: [mii interface function]
2842 *
2843 * Callback from MII layer when media changes.
2844 */
2845 void
2846 wm_gmii_statchg(struct device *self)
2847 {
2848 struct wm_softc *sc = (void *) self;
2849
2850 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2851
2852 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2853 DPRINTF(WM_DEBUG_LINK,
2854 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2855 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2856 } else {
2857 DPRINTF(WM_DEBUG_LINK,
2858 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2859 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2860 }
2861
2862 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2863 }
2864