if_wm.c revision 1.5 1 /* $NetBSD: if_wm.c,v 1.5 2002/05/08 19:00:27 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40 * and i82544 (``Cordova'') Gigabit Ethernet chips.
41 *
42 * TODO (in order of importance):
43 *
44 * - Fix hw VLAN assist.
45 *
46 * - Make GMII work on the Livengood.
47 *
48 * - Fix out-bound IP header checksums.
49 *
50 * - Fix UDP checksums.
51 *
52 * - Jumbo frames -- requires changes to network stack due to
53 * lame buffer length handling on chip.
54 *
55 * ...and, of course, performance tuning.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and mange
117 * up to 32 of them at a time. We allow up to 16 DMA segments per
118 * packet.
119 */
120 #define WM_NTXSEGS 16
121 #define WM_IFQUEUELEN 256
122 #define WM_TXQUEUELEN 32
123 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * The interrupt mitigation feature of the Wiseman is pretty cool -- as
131 * long as you're transmitting, you don't have to take an interrupt at
132 * all. However, we force an interrupt to happen every N + 1 packets
133 * in order to kick us in a reasonable amount of time when we run out
134 * of descriptors.
135 */
136 #define WM_TXINTR_MASK 7
137
138 /*
139 * Receive descriptor list size. We have one Rx buffer for normal
140 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
141 * packet. We allocate 128 receive descriptors, each with a 2k
142 * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
143 */
144 #define WM_NRXDESC 128
145 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
146 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
147 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
148
149 /*
150 * Control structures are DMA'd to the i82542 chip. We allocate them in
151 * a single clump that maps to a single DMA segment to make serveral things
152 * easier.
153 */
154 struct wm_control_data {
155 /*
156 * The transmit descriptors.
157 */
158 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
159
160 /*
161 * The receive descriptors.
162 */
163 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
164 };
165
166 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
167 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
168 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
169
170 /*
171 * Software state for transmit jobs.
172 */
173 struct wm_txsoft {
174 struct mbuf *txs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t txs_dmamap; /* our DMA map */
176 int txs_firstdesc; /* first descriptor in packet */
177 int txs_lastdesc; /* last descriptor in packet */
178 int txs_ndesc; /* # of descriptors used */
179 };
180
181 /*
182 * Software state for receive buffers. Each descriptor gets a
183 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
184 * more than one buffer, we chain them together.
185 */
186 struct wm_rxsoft {
187 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
188 bus_dmamap_t rxs_dmamap; /* our DMA map */
189 };
190
191 /*
192 * Software state per device.
193 */
194 struct wm_softc {
195 struct device sc_dev; /* generic device information */
196 bus_space_tag_t sc_st; /* bus space tag */
197 bus_space_handle_t sc_sh; /* bus space handle */
198 bus_dma_tag_t sc_dmat; /* bus DMA tag */
199 struct ethercom sc_ethercom; /* ethernet common data */
200 void *sc_sdhook; /* shutdown hook */
201
202 int sc_type; /* chip type; see below */
203 int sc_flags; /* flags; see below */
204
205 void *sc_ih; /* interrupt cookie */
206
207 struct mii_data sc_mii; /* MII/media information */
208
209 struct callout sc_tick_ch; /* tick callout */
210
211 bus_dmamap_t sc_cddmamap; /* control data DMA map */
212 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
213
214 /*
215 * Software state for the transmit and receive descriptors.
216 */
217 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
218 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
219
220 /*
221 * Control data structures.
222 */
223 struct wm_control_data *sc_control_data;
224 #define sc_txdescs sc_control_data->wcd_txdescs
225 #define sc_rxdescs sc_control_data->wcd_rxdescs
226
227 #ifdef WM_EVENT_COUNTERS
228 /* Event counters. */
229 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
230 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
231 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
232 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
233 struct evcnt sc_ev_rxintr; /* Rx interrupts */
234 struct evcnt sc_ev_linkintr; /* Link interrupts */
235
236 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
237 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
238 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
239 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
240
241 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
242 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
243 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
244
245 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
246 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
247
248 struct evcnt sc_ev_tu; /* Tx underrun */
249 #endif /* WM_EVENT_COUNTERS */
250
251 bus_addr_t sc_tdt_reg; /* offset of TDT register */
252
253 int sc_txfree; /* number of free Tx descriptors */
254 int sc_txnext; /* next ready Tx descriptor */
255 int sc_txwin; /* Tx descriptors since last Tx int */
256
257 int sc_txsfree; /* number of free Tx jobs */
258 int sc_txsnext; /* next free Tx job */
259 int sc_txsdirty; /* dirty Tx jobs */
260
261 uint32_t sc_txctx_tcmd; /* cached Tx cksum cmd */
262 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum start */
263 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum start */
264
265 bus_addr_t sc_rdt_reg; /* offset of RDT register */
266
267 int sc_rxptr; /* next ready Rx descriptor/queue ent */
268 int sc_rxdiscard;
269 int sc_rxlen;
270 struct mbuf *sc_rxhead;
271 struct mbuf *sc_rxtail;
272 struct mbuf **sc_rxtailp;
273
274 uint32_t sc_ctrl; /* prototype CTRL register */
275 #if 0
276 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
277 #endif
278 uint32_t sc_icr; /* prototype interrupt bits */
279 uint32_t sc_tctl; /* prototype TCTL register */
280 uint32_t sc_rctl; /* prototype RCTL register */
281 uint32_t sc_txcw; /* prototype TXCW register */
282 uint32_t sc_tipg; /* prototype TIPG register */
283
284 int sc_tbi_linkup; /* TBI link status */
285 int sc_tbi_anstate; /* autonegotiation state */
286
287 int sc_mchash_type; /* multicast filter offset */
288 };
289
290 #define WM_RXCHAIN_RESET(sc) \
291 do { \
292 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
293 *(sc)->sc_rxtailp = NULL; \
294 (sc)->sc_rxlen = 0; \
295 } while (/*CONSTCOND*/0)
296
297 #define WM_RXCHAIN_LINK(sc, m) \
298 do { \
299 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
300 (sc)->sc_rxtailp = &(m)->m_next; \
301 } while (/*CONSTCOND*/0)
302
303 /* sc_type */
304 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
305 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
306 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
307 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
308
309 /* sc_flags */
310 #define WM_F_HAS_MII 0x01 /* has MII */
311
312 #ifdef WM_EVENT_COUNTERS
313 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
314 #else
315 #define WM_EVCNT_INCR(ev) /* nothing */
316 #endif
317
318 #define CSR_READ(sc, reg) \
319 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
320 #define CSR_WRITE(sc, reg, val) \
321 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
322
323 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
324 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
325
326 #define WM_CDTXSYNC(sc, x, n, ops) \
327 do { \
328 int __x, __n; \
329 \
330 __x = (x); \
331 __n = (n); \
332 \
333 /* If it will wrap around, sync to the end of the ring. */ \
334 if ((__x + __n) > WM_NTXDESC) { \
335 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
336 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
337 (WM_NTXDESC - __x), (ops)); \
338 __n -= (WM_NTXDESC - __x); \
339 __x = 0; \
340 } \
341 \
342 /* Now sync whatever is left. */ \
343 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
344 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
345 } while (/*CONSTCOND*/0)
346
347 #define WM_CDRXSYNC(sc, x, ops) \
348 do { \
349 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
350 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
351 } while (/*CONSTCOND*/0)
352
353 #define WM_INIT_RXDESC(sc, x) \
354 do { \
355 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
356 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
357 struct mbuf *__m = __rxs->rxs_mbuf; \
358 \
359 /* \
360 * Note: We scoot the packet forward 2 bytes in the buffer \
361 * so that the payload after the Ethernet header is aligned \
362 * to a 4-byte boundary. \
363 * \
364 * XXX BRAINDAMAGE ALERT! \
365 * The stupid chip uses the same size for every buffer, which \
366 * is set in the Receive Control register. We are using the 2K \
367 * size option, but what we REALLY want is (2K - 2)! For this \
368 * reason, we can't accept packets longer than the standard \
369 * Ethernet MTU, without incurring a big penalty to copy every \
370 * incoming packet to a new, suitably aligned buffer. \
371 * \
372 * We'll need to make some changes to the layer 3/4 parts of \
373 * the stack (to copy the headers to a new buffer if not \
374 * aligned) in order to support large MTU on this chip. Lame. \
375 */ \
376 __m->m_data = __m->m_ext.ext_buf + 2; \
377 \
378 __rxd->wrx_addr.wa_low = \
379 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
380 __rxd->wrx_addr.wa_high = 0; \
381 __rxd->wrx_len = 0; \
382 __rxd->wrx_cksum = 0; \
383 __rxd->wrx_status = 0; \
384 __rxd->wrx_errors = 0; \
385 __rxd->wrx_special = 0; \
386 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
387 \
388 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
389 } while (/*CONSTCOND*/0)
390
391 void wm_start(struct ifnet *);
392 void wm_watchdog(struct ifnet *);
393 int wm_ioctl(struct ifnet *, u_long, caddr_t);
394 int wm_init(struct ifnet *);
395 void wm_stop(struct ifnet *, int);
396
397 void wm_shutdown(void *);
398
399 void wm_reset(struct wm_softc *);
400 void wm_rxdrain(struct wm_softc *);
401 int wm_add_rxbuf(struct wm_softc *, int);
402 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
403 void wm_tick(void *);
404
405 void wm_set_filter(struct wm_softc *);
406
407 int wm_intr(void *);
408 void wm_txintr(struct wm_softc *);
409 void wm_rxintr(struct wm_softc *);
410 void wm_linkintr(struct wm_softc *, uint32_t);
411
412 void wm_tbi_mediainit(struct wm_softc *);
413 int wm_tbi_mediachange(struct ifnet *);
414 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
415
416 void wm_tbi_set_linkled(struct wm_softc *);
417 void wm_tbi_check_link(struct wm_softc *);
418
419 void wm_gmii_reset(struct wm_softc *);
420
421 int wm_gmii_livengood_readreg(struct device *, int, int);
422 void wm_gmii_livengood_writereg(struct device *, int, int, int);
423
424 int wm_gmii_cordova_readreg(struct device *, int, int);
425 void wm_gmii_cordova_writereg(struct device *, int, int, int);
426
427 void wm_gmii_statchg(struct device *);
428
429 void wm_gmii_mediainit(struct wm_softc *);
430 int wm_gmii_mediachange(struct ifnet *);
431 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
432
433 int wm_match(struct device *, struct cfdata *, void *);
434 void wm_attach(struct device *, struct device *, void *);
435
436 int wm_copy_small = 0;
437
438 struct cfattach wm_ca = {
439 sizeof(struct wm_softc), wm_match, wm_attach,
440 };
441
442 /*
443 * Devices supported by this driver.
444 */
445 const struct wm_product {
446 pci_vendor_id_t wmp_vendor;
447 pci_product_id_t wmp_product;
448 const char *wmp_name;
449 int wmp_type;
450 int wmp_flags;
451 #define WMP_F_1000X 0x01
452 #define WMP_F_1000T 0x02
453 } wm_products[] = {
454 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
455 "Intel i82542 1000BASE-X Ethernet",
456 WM_T_WISEMAN_2_1, WMP_F_1000X },
457
458 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
459 "Intel i82543 1000BASE-X Ethernet",
460 WM_T_LIVENGOOD, WMP_F_1000X },
461
462 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
463 "Intel i82543-SC 1000BASE-X Ethernet",
464 WM_T_LIVENGOOD, WMP_F_1000X },
465
466 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
467 "Intel i82543 1000BASE-T Ethernet",
468 WM_T_LIVENGOOD, WMP_F_1000T },
469
470 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
471 "Intel i82544 1000BASE-T Ethernet",
472 WM_T_CORDOVA, WMP_F_1000T },
473
474 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
475 "Intel i82544 1000BASE-X Ethernet",
476 WM_T_CORDOVA, WMP_F_1000X },
477
478 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
479 "Intel i82544GC 1000BASE-T Ethernet",
480 WM_T_CORDOVA, WMP_F_1000T },
481
482 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
483 "Intel i82544GC 1000BASE-T Ethernet",
484 WM_T_CORDOVA, WMP_F_1000T },
485
486 { 0, 0,
487 NULL,
488 0, 0 },
489 };
490
491 #ifdef WM_EVENT_COUNTERS
492 #if WM_NTXSEGS != 16
493 #error Update wm_txseg_evcnt_names
494 #endif
495 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
496 "txseg1",
497 "txseg2",
498 "txseg3",
499 "txseg4",
500 "txseg5",
501 "txseg6",
502 "txseg7",
503 "txseg8",
504 "txseg9",
505 "txseg10",
506 "txseg11",
507 "txseg12",
508 "txseg13",
509 "txseg14",
510 "txseg15",
511 "txseg16",
512 };
513 #endif /* WM_EVENT_COUNTERS */
514
515 static const struct wm_product *
516 wm_lookup(const struct pci_attach_args *pa)
517 {
518 const struct wm_product *wmp;
519
520 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
521 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
522 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
523 return (wmp);
524 }
525 return (NULL);
526 }
527
528 int
529 wm_match(struct device *parent, struct cfdata *cf, void *aux)
530 {
531 struct pci_attach_args *pa = aux;
532
533 if (wm_lookup(pa) != NULL)
534 return (1);
535
536 return (0);
537 }
538
539 void
540 wm_attach(struct device *parent, struct device *self, void *aux)
541 {
542 struct wm_softc *sc = (void *) self;
543 struct pci_attach_args *pa = aux;
544 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
545 pci_chipset_tag_t pc = pa->pa_pc;
546 pci_intr_handle_t ih;
547 const char *intrstr = NULL;
548 bus_space_tag_t memt;
549 bus_space_handle_t memh;
550 bus_dma_segment_t seg;
551 int memh_valid;
552 int i, rseg, error;
553 const struct wm_product *wmp;
554 uint8_t enaddr[ETHER_ADDR_LEN];
555 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
556 pcireg_t preg, memtype;
557 int pmreg;
558
559 callout_init(&sc->sc_tick_ch);
560
561 wmp = wm_lookup(pa);
562 if (wmp == NULL) {
563 printf("\n");
564 panic("wm_attach: impossible");
565 }
566
567 sc->sc_dmat = pa->pa_dmat;
568
569 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
570 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
571
572 sc->sc_type = wmp->wmp_type;
573 if (sc->sc_type < WM_T_LIVENGOOD) {
574 if (preg < 2) {
575 printf("%s: Wiseman must be at least rev. 2\n",
576 sc->sc_dev.dv_xname);
577 return;
578 }
579 if (preg < 3)
580 sc->sc_type = WM_T_WISEMAN_2_0;
581 }
582
583 /*
584 * Map the device.
585 */
586 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
587 switch (memtype) {
588 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
589 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
590 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
591 memtype, 0, &memt, &memh, NULL, NULL) == 0);
592 break;
593 default:
594 memh_valid = 0;
595 }
596
597 if (memh_valid) {
598 sc->sc_st = memt;
599 sc->sc_sh = memh;
600 } else {
601 printf("%s: unable to map device registers\n",
602 sc->sc_dev.dv_xname);
603 return;
604 }
605
606 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
607 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
608 preg |= PCI_COMMAND_MASTER_ENABLE;
609 if (sc->sc_type < WM_T_WISEMAN_2_1)
610 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
611 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
612
613 /* Get it out of power save mode, if needed. */
614 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
615 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
616 if (preg == 3) {
617 /*
618 * The card has lost all configuration data in
619 * this state, so punt.
620 */
621 printf("%s: unable to wake from power state D3\n",
622 sc->sc_dev.dv_xname);
623 return;
624 }
625 if (preg != 0) {
626 printf("%s: waking up from power state D%d\n",
627 sc->sc_dev.dv_xname, preg);
628 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
629 }
630 }
631
632 /*
633 * Map and establish our interrupt.
634 */
635 if (pci_intr_map(pa, &ih)) {
636 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
637 return;
638 }
639 intrstr = pci_intr_string(pc, ih);
640 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
641 if (sc->sc_ih == NULL) {
642 printf("%s: unable to establish interrupt",
643 sc->sc_dev.dv_xname);
644 if (intrstr != NULL)
645 printf(" at %s", intrstr);
646 printf("\n");
647 return;
648 }
649 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
650
651 /*
652 * Allocate the control data structures, and create and load the
653 * DMA map for it.
654 */
655 if ((error = bus_dmamem_alloc(sc->sc_dmat,
656 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
657 0)) != 0) {
658 printf("%s: unable to allocate control data, error = %d\n",
659 sc->sc_dev.dv_xname, error);
660 goto fail_0;
661 }
662
663 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
664 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
665 BUS_DMA_COHERENT)) != 0) {
666 printf("%s: unable to map control data, error = %d\n",
667 sc->sc_dev.dv_xname, error);
668 goto fail_1;
669 }
670
671 if ((error = bus_dmamap_create(sc->sc_dmat,
672 sizeof(struct wm_control_data), 1,
673 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
674 printf("%s: unable to create control data DMA map, "
675 "error = %d\n", sc->sc_dev.dv_xname, error);
676 goto fail_2;
677 }
678
679 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
680 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
681 0)) != 0) {
682 printf("%s: unable to load control data DMA map, error = %d\n",
683 sc->sc_dev.dv_xname, error);
684 goto fail_3;
685 }
686
687 /*
688 * Create the transmit buffer DMA maps.
689 */
690 for (i = 0; i < WM_TXQUEUELEN; i++) {
691 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
692 WM_NTXSEGS, MCLBYTES, 0, 0,
693 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
694 printf("%s: unable to create Tx DMA map %d, "
695 "error = %d\n", sc->sc_dev.dv_xname, i, error);
696 goto fail_4;
697 }
698 }
699
700 /*
701 * Create the receive buffer DMA maps.
702 */
703 for (i = 0; i < WM_NRXDESC; i++) {
704 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
705 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
706 printf("%s: unable to create Rx DMA map %d, "
707 "error = %d\n", sc->sc_dev.dv_xname, i, error);
708 goto fail_5;
709 }
710 sc->sc_rxsoft[i].rxs_mbuf = NULL;
711 }
712
713 /*
714 * Reset the chip to a known state.
715 */
716 wm_reset(sc);
717
718 /*
719 * Read the Ethernet address from the EEPROM.
720 */
721 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
722 sizeof(myea) / sizeof(myea[0]), myea);
723 enaddr[0] = myea[0] & 0xff;
724 enaddr[1] = myea[0] >> 8;
725 enaddr[2] = myea[1] & 0xff;
726 enaddr[3] = myea[1] >> 8;
727 enaddr[4] = myea[2] & 0xff;
728 enaddr[5] = myea[2] >> 8;
729
730 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
731 ether_sprintf(enaddr));
732
733 /*
734 * Read the config info from the EEPROM, and set up various
735 * bits in the control registers based on their contents.
736 */
737 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
738 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
739 if (sc->sc_type >= WM_T_CORDOVA)
740 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
741
742 if (cfg1 & EEPROM_CFG1_ILOS)
743 sc->sc_ctrl |= CTRL_ILOS;
744 if (sc->sc_type >= WM_T_CORDOVA) {
745 sc->sc_ctrl |=
746 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
747 CTRL_SWDPIO_SHIFT;
748 sc->sc_ctrl |=
749 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
750 CTRL_SWDPINS_SHIFT;
751 } else {
752 sc->sc_ctrl |=
753 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
754 CTRL_SWDPIO_SHIFT;
755 }
756
757 #if 0
758 if (sc->sc_type >= WM_T_CORDOVA) {
759 if (cfg1 & EEPROM_CFG1_IPS0)
760 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
761 if (cfg1 & EEPROM_CFG1_IPS1)
762 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
763 sc->sc_ctrl_ext |=
764 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
765 CTRL_EXT_SWDPIO_SHIFT;
766 sc->sc_ctrl_ext |=
767 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
768 CTRL_EXT_SWDPINS_SHIFT;
769 } else {
770 sc->sc_ctrl_ext |=
771 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
772 CTRL_EXT_SWDPIO_SHIFT;
773 }
774 #endif
775
776 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
777 #if 0
778 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
779 #endif
780
781 /*
782 * Set up some register offsets that are different between
783 * the Wiseman and the Livengood and later chips.
784 */
785 if (sc->sc_type < WM_T_LIVENGOOD) {
786 sc->sc_rdt_reg = WMREG_OLD_RDT0;
787 sc->sc_tdt_reg = WMREG_OLD_TDT;
788 } else {
789 sc->sc_rdt_reg = WMREG_RDT;
790 sc->sc_tdt_reg = WMREG_TDT;
791 }
792
793 /*
794 * Determine if we should use flow control. We should
795 * always use it, unless we're on a Wiseman < 2.1.
796 */
797 if (sc->sc_type >= WM_T_WISEMAN_2_1)
798 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
799
800 /*
801 * Determine if we're TBI or GMII mode, and initialize the
802 * media structures accordingly.
803 */
804 if (sc->sc_type < WM_T_LIVENGOOD ||
805 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
806 if (wmp->wmp_flags & WMP_F_1000T)
807 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
808 "product!\n", sc->sc_dev.dv_xname);
809 wm_tbi_mediainit(sc);
810 } else {
811 if (wmp->wmp_flags & WMP_F_1000X)
812 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
813 "product!\n", sc->sc_dev.dv_xname);
814 wm_gmii_mediainit(sc);
815 }
816
817 ifp = &sc->sc_ethercom.ec_if;
818 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
819 ifp->if_softc = sc;
820 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
821 ifp->if_ioctl = wm_ioctl;
822 ifp->if_start = wm_start;
823 ifp->if_watchdog = wm_watchdog;
824 ifp->if_init = wm_init;
825 ifp->if_stop = wm_stop;
826 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
827 IFQ_SET_READY(&ifp->if_snd);
828
829 /*
830 * If we're a Livengood or greater, we can support VLANs.
831 */
832 if (sc->sc_type >= WM_T_LIVENGOOD)
833 sc->sc_ethercom.ec_capabilities |=
834 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
835
836 /*
837 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
838 * on Livengood and later.
839 */
840 if (sc->sc_type >= WM_T_LIVENGOOD)
841 ifp->if_capabilities |=
842 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
843
844 /*
845 * Attach the interface.
846 */
847 if_attach(ifp);
848 ether_ifattach(ifp, enaddr);
849
850 #ifdef WM_EVENT_COUNTERS
851 /* Attach event counters. */
852 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
853 NULL, sc->sc_dev.dv_xname, "txsstall");
854 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
855 NULL, sc->sc_dev.dv_xname, "txdstall");
856 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
857 NULL, sc->sc_dev.dv_xname, "txdw");
858 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
859 NULL, sc->sc_dev.dv_xname, "txqe");
860 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
861 NULL, sc->sc_dev.dv_xname, "rxintr");
862 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
863 NULL, sc->sc_dev.dv_xname, "linkintr");
864
865 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
866 NULL, sc->sc_dev.dv_xname, "rxipsum");
867 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
868 NULL, sc->sc_dev.dv_xname, "rxtusum");
869 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, "txipsum");
871 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
872 NULL, sc->sc_dev.dv_xname, "txtusum");
873
874 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
875 NULL, sc->sc_dev.dv_xname, "txctx init");
876 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
877 NULL, sc->sc_dev.dv_xname, "txctx hit");
878 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
879 NULL, sc->sc_dev.dv_xname, "txctx miss");
880
881 for (i = 0; i < WM_NTXSEGS; i++)
882 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
883 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
884
885 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
886 NULL, sc->sc_dev.dv_xname, "txdrop");
887
888 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
889 NULL, sc->sc_dev.dv_xname, "tu");
890 #endif /* WM_EVENT_COUNTERS */
891
892 /*
893 * Make sure the interface is shutdown during reboot.
894 */
895 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
896 if (sc->sc_sdhook == NULL)
897 printf("%s: WARNING: unable to establish shutdown hook\n",
898 sc->sc_dev.dv_xname);
899 return;
900
901 /*
902 * Free any resources we've allocated during the failed attach
903 * attempt. Do this in reverse order and fall through.
904 */
905 fail_5:
906 for (i = 0; i < WM_NRXDESC; i++) {
907 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
908 bus_dmamap_destroy(sc->sc_dmat,
909 sc->sc_rxsoft[i].rxs_dmamap);
910 }
911 fail_4:
912 for (i = 0; i < WM_TXQUEUELEN; i++) {
913 if (sc->sc_txsoft[i].txs_dmamap != NULL)
914 bus_dmamap_destroy(sc->sc_dmat,
915 sc->sc_txsoft[i].txs_dmamap);
916 }
917 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
918 fail_3:
919 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
920 fail_2:
921 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
922 sizeof(struct wm_control_data));
923 fail_1:
924 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
925 fail_0:
926 return;
927 }
928
929 /*
930 * wm_shutdown:
931 *
932 * Make sure the interface is stopped at reboot time.
933 */
934 void
935 wm_shutdown(void *arg)
936 {
937 struct wm_softc *sc = arg;
938
939 wm_stop(&sc->sc_ethercom.ec_if, 1);
940 }
941
942 /*
943 * wm_tx_cksum:
944 *
945 * Set up TCP/IP checksumming parameters for the
946 * specified packet.
947 */
948 static int
949 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
950 uint32_t *fieldsp)
951 {
952 struct mbuf *m0 = txs->txs_mbuf;
953 struct livengood_tcpip_ctxdesc *t;
954 uint32_t fields = 0, tcmd = 0, ipcs, tucs;
955 struct ip *ip;
956 int offset, iphl;
957
958 /*
959 * XXX It would be nice if the mbuf pkthdr had offset
960 * fields for the protocol headers.
961 */
962
963 /* XXX Assumes normal Ethernet encap. */
964 offset = ETHER_HDR_LEN;
965
966 /* XXX */
967 if (m0->m_len < (offset + sizeof(struct ip))) {
968 printf("%s: wm_tx_cksum: need to m_pullup, "
969 "packet dropped\n", sc->sc_dev.dv_xname);
970 return (EINVAL);
971 }
972
973 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
974 iphl = ip->ip_hl << 2;
975
976 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
977 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
978 tcmd |= htole32(WTX_TCPIP_CMD_IP);
979 fields |= htole32(WTX_IXSM);
980 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
981 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
982 WTX_TCPIP_IPCSE(offset + iphl - 1));
983 } else
984 ipcs = 0;
985
986 offset += iphl;
987
988 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
989 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
990 tcmd |= htole32(WTX_TCPIP_CMD_TCP);
991 fields |= htole32(WTX_TXSM);
992 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
993 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
994 WTX_TCPIP_TUCSE(0) /* rest of packet */);
995 } else
996 tucs = 0;
997
998 if (sc->sc_txctx_ipcs == ipcs &&
999 sc->sc_txctx_tucs == tucs &&
1000 sc->sc_txctx_tcmd == tcmd) {
1001 /* Cached context is fine. */
1002 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
1003 } else {
1004 /* Fill in the context descriptor. */
1005 #ifdef WM_EVENT_COUNTERS
1006 if (sc->sc_txctx_ipcs == 0xffffffff &&
1007 sc->sc_txctx_tucs == 0xffffffff &&
1008 sc->sc_txctx_tcmd == 0xffffffff)
1009 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
1010 else
1011 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1012 #endif
1013 t = (struct livengood_tcpip_ctxdesc *)
1014 &sc->sc_txdescs[sc->sc_txnext];
1015 t->tcpip_ipcs = ipcs;
1016 t->tcpip_tucs = tucs;
1017 t->tcpip_cmdlen =
1018 htole32(WTX_CMD_DEXT | WTX_CMD_IDE | WTX_DTYP_C) | tcmd;
1019 t->tcpip_seg = 0;
1020 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1021
1022 sc->sc_txctx_ipcs = ipcs;
1023 sc->sc_txctx_tucs = tucs;
1024 sc->sc_txctx_tcmd = tcmd;
1025
1026 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1027 txs->txs_ndesc++;
1028 sc->sc_txwin++;
1029 }
1030
1031 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1032 *fieldsp = fields;
1033
1034 return (0);
1035 }
1036
1037 /*
1038 * wm_start: [ifnet interface function]
1039 *
1040 * Start packet transmission on the interface.
1041 */
1042 void
1043 wm_start(struct ifnet *ifp)
1044 {
1045 struct wm_softc *sc = ifp->if_softc;
1046 struct mbuf *m0/*, *m*/;
1047 struct wm_txsoft *txs;
1048 bus_dmamap_t dmamap;
1049 int error, nexttx, lasttx, ofree, seg;
1050 uint32_t cksumcmd, cksumfields;
1051
1052 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1053 return;
1054
1055 /*
1056 * Remember the previous number of free descriptors.
1057 */
1058 ofree = sc->sc_txfree;
1059
1060 /*
1061 * Loop through the send queue, setting up transmit descriptors
1062 * until we drain the queue, or use up all available transmit
1063 * descriptors.
1064 */
1065 for (;;) {
1066 /* Grab a packet off the queue. */
1067 IFQ_POLL(&ifp->if_snd, m0);
1068 if (m0 == NULL)
1069 break;
1070
1071 DPRINTF(WM_DEBUG_TX,
1072 ("%s: TX: have packet to transmit: %p\n",
1073 sc->sc_dev.dv_xname, m0));
1074
1075 /* Get a work queue entry. */
1076 if (sc->sc_txsfree == 0) {
1077 DPRINTF(WM_DEBUG_TX,
1078 ("%s: TX: no free job descriptors\n",
1079 sc->sc_dev.dv_xname));
1080 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1081 break;
1082 }
1083
1084 txs = &sc->sc_txsoft[sc->sc_txsnext];
1085 dmamap = txs->txs_dmamap;
1086
1087 /*
1088 * Load the DMA map. If this fails, the packet either
1089 * didn't fit in the allotted number of segments, or we
1090 * were short on resources. For the too-many-segments
1091 * case, we simply report an error and drop the packet,
1092 * since we can't sanely copy a jumbo packet to a single
1093 * buffer.
1094 */
1095 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1096 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1097 if (error) {
1098 if (error == EFBIG) {
1099 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1100 printf("%s: Tx packet consumes too many "
1101 "DMA segments, dropping...\n",
1102 sc->sc_dev.dv_xname);
1103 IFQ_DEQUEUE(&ifp->if_snd, m0);
1104 m_freem(m0);
1105 continue;
1106 }
1107 /*
1108 * Short on resources, just stop for now.
1109 */
1110 DPRINTF(WM_DEBUG_TX,
1111 ("%s: TX: dmamap load failed: %d\n",
1112 sc->sc_dev.dv_xname, error));
1113 break;
1114 }
1115
1116 /*
1117 * Ensure we have enough descriptors free to describe
1118 * the packet. Note, we always reserve one descriptor
1119 * at the end of the ring due to the semantics of the
1120 * TDT register, plus one more in the event we need
1121 * to re-load checksum offload context.
1122 */
1123 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1124 /*
1125 * Not enough free descriptors to transmit this
1126 * packet. We haven't committed anything yet,
1127 * so just unload the DMA map, put the packet
1128 * pack on the queue, and punt. Notify the upper
1129 * layer that there are no more slots left.
1130 */
1131 DPRINTF(WM_DEBUG_TX,
1132 ("%s: TX: need %d descriptors, have %d\n",
1133 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1134 sc->sc_txfree - 1));
1135 ifp->if_flags |= IFF_OACTIVE;
1136 bus_dmamap_unload(sc->sc_dmat, dmamap);
1137 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1138 break;
1139 }
1140
1141 IFQ_DEQUEUE(&ifp->if_snd, m0);
1142
1143 /*
1144 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1145 */
1146
1147 /* Sync the DMA map. */
1148 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1149 BUS_DMASYNC_PREWRITE);
1150
1151 DPRINTF(WM_DEBUG_TX,
1152 ("%s: TX: packet has %d DMA segments\n",
1153 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1154
1155 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1156
1157 /*
1158 * Store a pointer to the packet so that we can free it
1159 * later.
1160 *
1161 * Initially, we consider the number of descriptors the
1162 * packet uses the number of DMA segments. This may be
1163 * incremented by 1 if we do checksum offload (a descriptor
1164 * is used to set the checksum context).
1165 */
1166 txs->txs_mbuf = m0;
1167 txs->txs_ndesc = dmamap->dm_nsegs;
1168
1169 /*
1170 * Set up checksum offload parameters for
1171 * this packet.
1172 */
1173 if (m0->m_pkthdr.csum_flags &
1174 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1175 if (wm_tx_cksum(sc, txs, &cksumcmd,
1176 &cksumfields) != 0) {
1177 /* Error message already displayed. */
1178 m_freem(m0);
1179 bus_dmamap_unload(sc->sc_dmat, dmamap);
1180 txs->txs_mbuf = NULL;
1181 continue;
1182 }
1183 } else {
1184 cksumcmd = 0;
1185 cksumfields = 0;
1186 }
1187
1188 /*
1189 * Initialize the transmit descriptor.
1190 */
1191 for (nexttx = sc->sc_txnext, seg = 0;
1192 seg < dmamap->dm_nsegs;
1193 seg++, nexttx = WM_NEXTTX(nexttx)) {
1194 /*
1195 * Note: we currently only use 32-bit DMA
1196 * addresses.
1197 */
1198 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1199 htole32(dmamap->dm_segs[seg].ds_addr);
1200 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1201 htole32(dmamap->dm_segs[seg].ds_len);
1202 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1203 cksumfields;
1204 lasttx = nexttx;
1205
1206 sc->sc_txwin++;
1207
1208 DPRINTF(WM_DEBUG_TX,
1209 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1210 sc->sc_dev.dv_xname, nexttx,
1211 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1212 (uint32_t) dmamap->dm_segs[seg].ds_len));
1213 }
1214
1215 /*
1216 * Set up the command byte on the last descriptor of
1217 * the packet. If we're in the interrupt delay window,
1218 * delay the interrupt.
1219 */
1220 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1221 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RPS);
1222 if (sc->sc_txwin < (WM_NTXDESC * 2 / 3))
1223 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1224 htole32(WTX_CMD_IDE);
1225 else
1226 sc->sc_txwin = 0;
1227
1228 #if 0 /* XXXJRT */
1229 /*
1230 * If VLANs are enabled and the packet has a VLAN tag, set
1231 * up the descriptor to encapsulate the packet for us.
1232 *
1233 * This is only valid on the last descriptor of the packet.
1234 */
1235 if (sc->sc_ethercom.ec_nvlans != 0 &&
1236 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1237 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1238 htole32(WTX_CMD_VLE);
1239 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1240 = htole16(*mtod(m, int *) & 0xffff);
1241 }
1242 #endif /* XXXJRT */
1243
1244 DPRINTF(WM_DEBUG_TX,
1245 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1246 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1247
1248 /* Sync the descriptors we're using. */
1249 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1250 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1251
1252 /* Give the packet to the chip. */
1253 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1254
1255 DPRINTF(WM_DEBUG_TX,
1256 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1257
1258 /*
1259 * Remember that txdirty will be once the packet is
1260 * done.
1261 *
1262 * Note: If we're doing checksum offload, we are actually
1263 * using one descriptor before firstdesc, but it doesn't
1264 * really matter.
1265 */
1266 txs->txs_firstdesc = sc->sc_txnext;
1267 txs->txs_lastdesc = lasttx;
1268
1269 DPRINTF(WM_DEBUG_TX,
1270 ("%s: TX: finished transmitting packet, job %d\n",
1271 sc->sc_dev.dv_xname, sc->sc_txsnext));
1272
1273 /* Advance the tx pointer. */
1274 sc->sc_txfree -= txs->txs_ndesc;
1275 sc->sc_txnext = nexttx;
1276
1277 sc->sc_txsfree--;
1278 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1279
1280 #if NBPFILTER > 0
1281 /* Pass the packet to any BPF listeners. */
1282 if (ifp->if_bpf)
1283 bpf_mtap(ifp->if_bpf, m0);
1284 #endif /* NBPFILTER > 0 */
1285 }
1286
1287 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1288 /* No more slots; notify upper layer. */
1289 ifp->if_flags |= IFF_OACTIVE;
1290 }
1291
1292 if (sc->sc_txfree != ofree) {
1293 /* Set a watchdog timer in case the chip flakes out. */
1294 ifp->if_timer = 5;
1295 }
1296 }
1297
1298 /*
1299 * wm_watchdog: [ifnet interface function]
1300 *
1301 * Watchdog timer handler.
1302 */
1303 void
1304 wm_watchdog(struct ifnet *ifp)
1305 {
1306 struct wm_softc *sc = ifp->if_softc;
1307
1308 /*
1309 * Since we're using delayed interrupts, sweep up
1310 * before we report an error.
1311 */
1312 wm_txintr(sc);
1313
1314 if (sc->sc_txfree != WM_NTXDESC) {
1315 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1316 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1317 sc->sc_txnext);
1318 ifp->if_oerrors++;
1319
1320 /* Reset the interface. */
1321 (void) wm_init(ifp);
1322 }
1323
1324 /* Try to get more packets going. */
1325 wm_start(ifp);
1326 }
1327
1328 /*
1329 * wm_ioctl: [ifnet interface function]
1330 *
1331 * Handle control requests from the operator.
1332 */
1333 int
1334 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1335 {
1336 struct wm_softc *sc = ifp->if_softc;
1337 struct ifreq *ifr = (struct ifreq *) data;
1338 int s, error;
1339
1340 s = splnet();
1341
1342 switch (cmd) {
1343 case SIOCSIFMEDIA:
1344 case SIOCGIFMEDIA:
1345 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1346 break;
1347
1348 default:
1349 error = ether_ioctl(ifp, cmd, data);
1350 if (error == ENETRESET) {
1351 /*
1352 * Multicast list has changed; set the hardware filter
1353 * accordingly.
1354 */
1355 wm_set_filter(sc);
1356 error = 0;
1357 }
1358 break;
1359 }
1360
1361 /* Try to get more packets going. */
1362 wm_start(ifp);
1363
1364 splx(s);
1365 return (error);
1366 }
1367
1368 /*
1369 * wm_intr:
1370 *
1371 * Interrupt service routine.
1372 */
1373 int
1374 wm_intr(void *arg)
1375 {
1376 struct wm_softc *sc = arg;
1377 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1378 uint32_t icr;
1379 int wantinit, handled = 0;
1380
1381 for (wantinit = 0; wantinit == 0;) {
1382 icr = CSR_READ(sc, WMREG_ICR);
1383 if ((icr & sc->sc_icr) == 0)
1384 break;
1385
1386 handled = 1;
1387
1388 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1389 DPRINTF(WM_DEBUG_RX,
1390 ("%s: RX: got Rx intr 0x%08x\n",
1391 sc->sc_dev.dv_xname,
1392 icr & (ICR_RXDMT0|ICR_RXT0)));
1393 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1394 wm_rxintr(sc);
1395 }
1396
1397 if (icr & (ICR_TXDW|ICR_TXQE)) {
1398 DPRINTF(WM_DEBUG_TX,
1399 ("%s: TX: got TDXW|TXQE interrupt\n",
1400 sc->sc_dev.dv_xname));
1401 #ifdef WM_EVENT_COUNTERS
1402 if (icr & ICR_TXDW)
1403 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1404 else if (icr & ICR_TXQE)
1405 WM_EVCNT_INCR(&sc->sc_ev_txqe);
1406 #endif
1407 wm_txintr(sc);
1408 }
1409
1410 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1411 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1412 wm_linkintr(sc, icr);
1413 }
1414
1415 if (icr & ICR_RXO) {
1416 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1417 wantinit = 1;
1418 }
1419 }
1420
1421 if (handled) {
1422 if (wantinit)
1423 wm_init(ifp);
1424
1425 /* Try to get more packets going. */
1426 wm_start(ifp);
1427 }
1428
1429 return (handled);
1430 }
1431
1432 /*
1433 * wm_txintr:
1434 *
1435 * Helper; handle transmit interrupts.
1436 */
1437 void
1438 wm_txintr(struct wm_softc *sc)
1439 {
1440 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1441 struct wm_txsoft *txs;
1442 uint8_t status;
1443 int i;
1444
1445 ifp->if_flags &= ~IFF_OACTIVE;
1446
1447 /*
1448 * Go through the Tx list and free mbufs for those
1449 * frams which have been transmitted.
1450 */
1451 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1452 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1453 txs = &sc->sc_txsoft[i];
1454
1455 DPRINTF(WM_DEBUG_TX,
1456 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1457
1458 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1459 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1460
1461 status = le32toh(sc->sc_txdescs[
1462 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1463 if ((status & WTX_ST_DD) == 0)
1464 break;
1465
1466 DPRINTF(WM_DEBUG_TX,
1467 ("%s: TX: job %d done: descs %d..%d\n",
1468 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1469 txs->txs_lastdesc));
1470
1471 /*
1472 * XXX We should probably be using the statistics
1473 * XXX registers, but I don't know if they exist
1474 * XXX on chips before the Cordova.
1475 */
1476
1477 #ifdef WM_EVENT_COUNTERS
1478 if (status & WTX_ST_TU)
1479 WM_EVCNT_INCR(&sc->sc_ev_tu);
1480 #endif /* WM_EVENT_COUNTERS */
1481
1482 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1483 ifp->if_oerrors++;
1484 if (status & WTX_ST_LC)
1485 printf("%s: late collision\n",
1486 sc->sc_dev.dv_xname);
1487 else if (status & WTX_ST_EC) {
1488 ifp->if_collisions += 16;
1489 printf("%s: excessive collisions\n",
1490 sc->sc_dev.dv_xname);
1491 }
1492 } else
1493 ifp->if_opackets++;
1494
1495 sc->sc_txfree += txs->txs_ndesc;
1496 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1497 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1498 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1499 m_freem(txs->txs_mbuf);
1500 txs->txs_mbuf = NULL;
1501 }
1502
1503 /* Update the dirty transmit buffer pointer. */
1504 sc->sc_txsdirty = i;
1505 DPRINTF(WM_DEBUG_TX,
1506 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1507
1508 /*
1509 * If there are no more pending transmissions, cancel the watchdog
1510 * timer.
1511 */
1512 if (sc->sc_txsfree == WM_TXQUEUELEN)
1513 ifp->if_timer = 0;
1514 if (sc->sc_txfree == WM_NTXDESC)
1515 sc->sc_txwin = 0;
1516 }
1517
1518 /*
1519 * wm_rxintr:
1520 *
1521 * Helper; handle receive interrupts.
1522 */
1523 void
1524 wm_rxintr(struct wm_softc *sc)
1525 {
1526 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1527 struct wm_rxsoft *rxs;
1528 struct mbuf *m;
1529 int i, len;
1530 uint8_t status, errors;
1531
1532 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1533 rxs = &sc->sc_rxsoft[i];
1534
1535 DPRINTF(WM_DEBUG_RX,
1536 ("%s: RX: checking descriptor %d\n",
1537 sc->sc_dev.dv_xname, i));
1538
1539 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1540
1541 status = sc->sc_rxdescs[i].wrx_status;
1542 errors = sc->sc_rxdescs[i].wrx_errors;
1543 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1544
1545 if ((status & WRX_ST_DD) == 0) {
1546 /*
1547 * We have processed all of the receive descriptors.
1548 */
1549 break;
1550 }
1551
1552 if (__predict_false(sc->sc_rxdiscard)) {
1553 DPRINTF(WM_DEBUG_RX,
1554 ("%s: RX: discarding contents of descriptor %d\n",
1555 sc->sc_dev.dv_xname, i));
1556 WM_INIT_RXDESC(sc, i);
1557 if (status & WRX_ST_EOP) {
1558 /* Reset our state. */
1559 DPRINTF(WM_DEBUG_RX,
1560 ("%s: RX: resetting rxdiscard -> 0\n",
1561 sc->sc_dev.dv_xname));
1562 sc->sc_rxdiscard = 0;
1563 }
1564 continue;
1565 }
1566
1567 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1568 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1569
1570 m = rxs->rxs_mbuf;
1571
1572 /*
1573 * Add a new receive buffer to the ring.
1574 */
1575 if (wm_add_rxbuf(sc, i) != 0) {
1576 /*
1577 * Failed, throw away what we've done so
1578 * far, and discard the rest of the packet.
1579 */
1580 ifp->if_ierrors++;
1581 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1582 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1583 WM_INIT_RXDESC(sc, i);
1584 if ((status & WRX_ST_EOP) == 0)
1585 sc->sc_rxdiscard = 1;
1586 if (sc->sc_rxhead != NULL)
1587 m_freem(sc->sc_rxhead);
1588 WM_RXCHAIN_RESET(sc);
1589 DPRINTF(WM_DEBUG_RX,
1590 ("%s: RX: Rx buffer allocation failed, "
1591 "dropping packet%s\n", sc->sc_dev.dv_xname,
1592 sc->sc_rxdiscard ? " (discard)" : ""));
1593 continue;
1594 }
1595
1596 WM_RXCHAIN_LINK(sc, m);
1597
1598 m->m_len = len;
1599
1600 DPRINTF(WM_DEBUG_RX,
1601 ("%s: RX: buffer at %p len %d\n",
1602 sc->sc_dev.dv_xname, m->m_data, len));
1603
1604 /*
1605 * If this is not the end of the packet, keep
1606 * looking.
1607 */
1608 if ((status & WRX_ST_EOP) == 0) {
1609 sc->sc_rxlen += len;
1610 DPRINTF(WM_DEBUG_RX,
1611 ("%s: RX: not yet EOP, rxlen -> %d\n",
1612 sc->sc_dev.dv_xname, sc->sc_rxlen));
1613 continue;
1614 }
1615
1616 /*
1617 * Okay, we have the entire packet now...
1618 */
1619 *sc->sc_rxtailp = NULL;
1620 m = sc->sc_rxhead;
1621 len += sc->sc_rxlen;
1622
1623 WM_RXCHAIN_RESET(sc);
1624
1625 DPRINTF(WM_DEBUG_RX,
1626 ("%s: RX: have entire packet, len -> %d\n",
1627 sc->sc_dev.dv_xname, len));
1628
1629 /*
1630 * If an error occurred, update stats and drop the packet.
1631 */
1632 if (errors &
1633 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1634 ifp->if_ierrors++;
1635 if (errors & WRX_ER_SE)
1636 printf("%s: symbol error\n",
1637 sc->sc_dev.dv_xname);
1638 else if (errors & WRX_ER_SEQ)
1639 printf("%s: receive sequence error\n",
1640 sc->sc_dev.dv_xname);
1641 else if (errors & WRX_ER_CE)
1642 printf("%s: CRC error\n",
1643 sc->sc_dev.dv_xname);
1644 m_freem(m);
1645 continue;
1646 }
1647
1648 /*
1649 * No errors. Receive the packet.
1650 *
1651 * Note, we have configured the chip to include the
1652 * CRC with every packet.
1653 */
1654 m->m_flags |= M_HASFCS;
1655 m->m_pkthdr.rcvif = ifp;
1656 m->m_pkthdr.len = len;
1657
1658 #if 0 /* XXXJRT */
1659 /*
1660 * If VLANs are enabled, VLAN packets have been unwrapped
1661 * for us. Associate the tag with the packet.
1662 */
1663 if (sc->sc_ethercom.ec_nvlans != 0 &&
1664 (status & WRX_ST_VP) != 0) {
1665 struct mbuf *vtag;
1666
1667 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1668 if (vtag == NULL) {
1669 ifp->if_ierrors++;
1670 printf("%s: unable to allocate VLAN tag\n",
1671 sc->sc_dev.dv_xname);
1672 m_freem(m);
1673 continue;
1674 }
1675
1676 *mtod(m, int *) =
1677 le16toh(sc->sc_rxdescs[i].wrx_special);
1678 vtag->m_len = sizeof(int);
1679 }
1680 #endif /* XXXJRT */
1681
1682 /*
1683 * Set up checksum info for this packet.
1684 */
1685 if (status & WRX_ST_IPCS) {
1686 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1687 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1688 if (errors & WRX_ER_IPE)
1689 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1690 }
1691 if (status & WRX_ST_TCPCS) {
1692 /*
1693 * Note: we don't know if this was TCP or UDP,
1694 * so we just set both bits, and expect the
1695 * upper layers to deal.
1696 */
1697 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1698 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1699 if (errors & WRX_ER_TCPE)
1700 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1701 }
1702
1703 ifp->if_ipackets++;
1704
1705 #if NBPFILTER > 0
1706 /* Pass this up to any BPF listeners. */
1707 if (ifp->if_bpf)
1708 bpf_mtap(ifp->if_bpf, m);
1709 #endif /* NBPFILTER > 0 */
1710
1711 /* Pass it on. */
1712 (*ifp->if_input)(ifp, m);
1713 }
1714
1715 /* Update the receive pointer. */
1716 sc->sc_rxptr = i;
1717
1718 DPRINTF(WM_DEBUG_RX,
1719 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1720 }
1721
1722 /*
1723 * wm_linkintr:
1724 *
1725 * Helper; handle link interrupts.
1726 */
1727 void
1728 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1729 {
1730 uint32_t status;
1731
1732 /*
1733 * If we get a link status interrupt on a 1000BASE-T
1734 * device, just fall into the normal MII tick path.
1735 */
1736 if (sc->sc_flags & WM_F_HAS_MII) {
1737 if (icr & ICR_LSC) {
1738 DPRINTF(WM_DEBUG_LINK,
1739 ("%s: LINK: LSC -> mii_tick\n",
1740 sc->sc_dev.dv_xname));
1741 mii_tick(&sc->sc_mii);
1742 } else if (icr & ICR_RXSEQ) {
1743 DPRINTF(WM_DEBUG_LINK,
1744 ("%s: LINK Receive sequence error\n",
1745 sc->sc_dev.dv_xname));
1746 }
1747 return;
1748 }
1749
1750 /*
1751 * If we are now receiving /C/, check for link again in
1752 * a couple of link clock ticks.
1753 */
1754 if (icr & ICR_RXCFG) {
1755 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1756 sc->sc_dev.dv_xname));
1757 sc->sc_tbi_anstate = 2;
1758 }
1759
1760 if (icr & ICR_LSC) {
1761 status = CSR_READ(sc, WMREG_STATUS);
1762 if (status & STATUS_LU) {
1763 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1764 sc->sc_dev.dv_xname,
1765 (status & STATUS_FD) ? "FDX" : "HDX"));
1766 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1767 if (status & STATUS_FD)
1768 sc->sc_tctl |=
1769 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1770 else
1771 sc->sc_tctl |=
1772 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1773 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1774 sc->sc_tbi_linkup = 1;
1775 } else {
1776 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1777 sc->sc_dev.dv_xname));
1778 sc->sc_tbi_linkup = 0;
1779 }
1780 sc->sc_tbi_anstate = 2;
1781 wm_tbi_set_linkled(sc);
1782 } else if (icr & ICR_RXSEQ) {
1783 DPRINTF(WM_DEBUG_LINK,
1784 ("%s: LINK: Receive sequence error\n",
1785 sc->sc_dev.dv_xname));
1786 }
1787 }
1788
1789 /*
1790 * wm_tick:
1791 *
1792 * One second timer, used to check link status, sweep up
1793 * completed transmit jobs, etc.
1794 */
1795 void
1796 wm_tick(void *arg)
1797 {
1798 struct wm_softc *sc = arg;
1799 int s;
1800
1801 s = splnet();
1802
1803 if (sc->sc_flags & WM_F_HAS_MII)
1804 mii_tick(&sc->sc_mii);
1805 else
1806 wm_tbi_check_link(sc);
1807
1808 splx(s);
1809
1810 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1811 }
1812
1813 /*
1814 * wm_reset:
1815 *
1816 * Reset the i82542 chip.
1817 */
1818 void
1819 wm_reset(struct wm_softc *sc)
1820 {
1821 int i;
1822
1823 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1824 delay(10000);
1825
1826 for (i = 0; i < 1000; i++) {
1827 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1828 return;
1829 delay(20);
1830 }
1831
1832 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1833 printf("%s: WARNING: reset failed to complete\n",
1834 sc->sc_dev.dv_xname);
1835 }
1836
1837 /*
1838 * wm_init: [ifnet interface function]
1839 *
1840 * Initialize the interface. Must be called at splnet().
1841 */
1842 int
1843 wm_init(struct ifnet *ifp)
1844 {
1845 struct wm_softc *sc = ifp->if_softc;
1846 struct wm_rxsoft *rxs;
1847 int i, error = 0;
1848 uint32_t reg;
1849
1850 /* Cancel any pending I/O. */
1851 wm_stop(ifp, 0);
1852
1853 /* Reset the chip to a known state. */
1854 wm_reset(sc);
1855
1856 /* Initialize the transmit descriptor ring. */
1857 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1858 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1859 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1860 sc->sc_txfree = WM_NTXDESC;
1861 sc->sc_txnext = 0;
1862 sc->sc_txwin = 0;
1863
1864 sc->sc_txctx_tcmd = 0xffffffff;
1865 sc->sc_txctx_ipcs = 0xffffffff;
1866 sc->sc_txctx_tucs = 0xffffffff;
1867
1868 if (sc->sc_type < WM_T_LIVENGOOD) {
1869 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1870 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1871 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1872 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1873 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1874 CSR_WRITE(sc, WMREG_OLD_TIDV, 64);
1875 } else {
1876 CSR_WRITE(sc, WMREG_TBDAH, 0);
1877 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1878 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1879 CSR_WRITE(sc, WMREG_TDH, 0);
1880 CSR_WRITE(sc, WMREG_TDT, 0);
1881 CSR_WRITE(sc, WMREG_TIDV, 64);
1882
1883 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1884 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1885 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1886 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1887 }
1888 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1889 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1890
1891 /* Initialize the transmit job descriptors. */
1892 for (i = 0; i < WM_TXQUEUELEN; i++)
1893 sc->sc_txsoft[i].txs_mbuf = NULL;
1894 sc->sc_txsfree = WM_TXQUEUELEN;
1895 sc->sc_txsnext = 0;
1896 sc->sc_txsdirty = 0;
1897
1898 /*
1899 * Initialize the receive descriptor and receive job
1900 * descriptor rings.
1901 */
1902 if (sc->sc_type < WM_T_LIVENGOOD) {
1903 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1904 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1905 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1906 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1907 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1908 CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1909
1910 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1911 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1912 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1913 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1914 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1915 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1916 } else {
1917 CSR_WRITE(sc, WMREG_RDBAH, 0);
1918 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1919 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1920 CSR_WRITE(sc, WMREG_RDH, 0);
1921 CSR_WRITE(sc, WMREG_RDT, 0);
1922 CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1923 }
1924 for (i = 0; i < WM_NRXDESC; i++) {
1925 rxs = &sc->sc_rxsoft[i];
1926 if (rxs->rxs_mbuf == NULL) {
1927 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1928 printf("%s: unable to allocate or map rx "
1929 "buffer %d, error = %d\n",
1930 sc->sc_dev.dv_xname, i, error);
1931 /*
1932 * XXX Should attempt to run with fewer receive
1933 * XXX buffers instead of just failing.
1934 */
1935 wm_rxdrain(sc);
1936 goto out;
1937 }
1938 } else
1939 WM_INIT_RXDESC(sc, i);
1940 }
1941 sc->sc_rxptr = 0;
1942 sc->sc_rxdiscard = 0;
1943 WM_RXCHAIN_RESET(sc);
1944
1945 /*
1946 * Clear out the VLAN table -- we don't use it (yet).
1947 */
1948 CSR_WRITE(sc, WMREG_VET, 0);
1949 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1950 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1951
1952 /*
1953 * Set up flow-control parameters.
1954 *
1955 * XXX Values could probably stand some tuning.
1956 */
1957 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1958 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1959 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1960 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1961
1962 if (sc->sc_type < WM_T_LIVENGOOD) {
1963 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1964 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1965 } else {
1966 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1967 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1968 }
1969 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1970 }
1971
1972 #if 0 /* XXXJRT */
1973 /* Deal with VLAN enables. */
1974 if (sc->sc_ethercom.ec_nvlans != 0)
1975 sc->sc_ctrl |= CTRL_VME;
1976 else
1977 #endif /* XXXJRT */
1978 sc->sc_ctrl &= ~CTRL_VME;
1979
1980 /* Write the control registers. */
1981 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1982 #if 0
1983 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1984 #endif
1985
1986 /*
1987 * Set up checksum offload parameters.
1988 */
1989 reg = CSR_READ(sc, WMREG_RXCSUM);
1990 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1991 reg |= RXCSUM_IPOFL;
1992 else
1993 reg &= ~RXCSUM_IPOFL;
1994 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1995 reg |= RXCSUM_TUOFL;
1996 else
1997 reg &= ~RXCSUM_TUOFL;
1998 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1999
2000 /*
2001 * Set up the interrupt registers.
2002 */
2003 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
2004 sc->sc_icr = ICR_TXDW | ICR_TXQE | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
2005 ICR_RXO | ICR_RXT0;
2006 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
2007 sc->sc_icr |= ICR_RXCFG;
2008 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
2009
2010 /* Set up the inter-packet gap. */
2011 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
2012
2013 #if 0 /* XXXJRT */
2014 /* Set the VLAN ethernetype. */
2015 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
2016 #endif
2017
2018 /*
2019 * Set up the transmit control register; we start out with
2020 * a collision distance suitable for FDX, but update it whe
2021 * we resolve the media type.
2022 */
2023 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
2024 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2025 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2026
2027 /* Set the media. */
2028 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2029
2030 /*
2031 * Set up the receive control register; we actually program
2032 * the register when we set the receive filter. Use multicast
2033 * address offset type 0.
2034 *
2035 * Only the Cordova has the ability to strip the incoming
2036 * CRC, so we don't enable that feature.
2037 */
2038 sc->sc_mchash_type = 0;
2039 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2040 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2041
2042 /* Set the receive filter. */
2043 wm_set_filter(sc);
2044
2045 /* Start the one second link check clock. */
2046 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2047
2048 /* ...all done! */
2049 ifp->if_flags |= IFF_RUNNING;
2050 ifp->if_flags &= ~IFF_OACTIVE;
2051
2052 out:
2053 if (error)
2054 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2055 return (error);
2056 }
2057
2058 /*
2059 * wm_rxdrain:
2060 *
2061 * Drain the receive queue.
2062 */
2063 void
2064 wm_rxdrain(struct wm_softc *sc)
2065 {
2066 struct wm_rxsoft *rxs;
2067 int i;
2068
2069 for (i = 0; i < WM_NRXDESC; i++) {
2070 rxs = &sc->sc_rxsoft[i];
2071 if (rxs->rxs_mbuf != NULL) {
2072 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2073 m_freem(rxs->rxs_mbuf);
2074 rxs->rxs_mbuf = NULL;
2075 }
2076 }
2077 }
2078
2079 /*
2080 * wm_stop: [ifnet interface function]
2081 *
2082 * Stop transmission on the interface.
2083 */
2084 void
2085 wm_stop(struct ifnet *ifp, int disable)
2086 {
2087 struct wm_softc *sc = ifp->if_softc;
2088 struct wm_txsoft *txs;
2089 int i;
2090
2091 /* Stop the one second clock. */
2092 callout_stop(&sc->sc_tick_ch);
2093
2094 if (sc->sc_flags & WM_F_HAS_MII) {
2095 /* Down the MII. */
2096 mii_down(&sc->sc_mii);
2097 }
2098
2099 /* Stop the transmit and receive processes. */
2100 CSR_WRITE(sc, WMREG_TCTL, 0);
2101 CSR_WRITE(sc, WMREG_RCTL, 0);
2102
2103 /* Release any queued transmit buffers. */
2104 for (i = 0; i < WM_TXQUEUELEN; i++) {
2105 txs = &sc->sc_txsoft[i];
2106 if (txs->txs_mbuf != NULL) {
2107 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2108 m_freem(txs->txs_mbuf);
2109 txs->txs_mbuf = NULL;
2110 }
2111 }
2112
2113 if (disable)
2114 wm_rxdrain(sc);
2115
2116 /* Mark the interface as down and cancel the watchdog timer. */
2117 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2118 ifp->if_timer = 0;
2119 }
2120
2121 /*
2122 * wm_read_eeprom:
2123 *
2124 * Read data from the serial EEPROM.
2125 */
2126 void
2127 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2128 {
2129 uint32_t reg;
2130 int i, x;
2131
2132 for (i = 0; i < wordcnt; i++) {
2133 /* Send CHIP SELECT for one clock tick. */
2134 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2135 delay(2);
2136
2137 /* Shift in the READ command. */
2138 for (x = 3; x > 0; x--) {
2139 reg = EECD_CS;
2140 if (UWIRE_OPC_READ & (1 << (x - 1)))
2141 reg |= EECD_DI;
2142 CSR_WRITE(sc, WMREG_EECD, reg);
2143 delay(2);
2144 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2145 delay(2);
2146 CSR_WRITE(sc, WMREG_EECD, reg);
2147 delay(2);
2148 }
2149
2150 /* Shift in address. */
2151 for (x = 6; x > 0; x--) {
2152 reg = EECD_CS;
2153 if ((word + i) & (1 << (x - 1)))
2154 reg |= EECD_DI;
2155 CSR_WRITE(sc, WMREG_EECD, reg);
2156 delay(2);
2157 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2158 delay(2);
2159 CSR_WRITE(sc, WMREG_EECD, reg);
2160 delay(2);
2161 }
2162
2163 /* Shift out the data. */
2164 reg = EECD_CS;
2165 data[i] = 0;
2166 for (x = 16; x > 0; x--) {
2167 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2168 delay(2);
2169 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2170 data[i] |= (1 << (x - 1));
2171 CSR_WRITE(sc, WMREG_EECD, reg);
2172 delay(2);
2173 }
2174
2175 /* Clear CHIP SELECT. */
2176 CSR_WRITE(sc, WMREG_EECD, 0);
2177 }
2178 }
2179
2180 /*
2181 * wm_add_rxbuf:
2182 *
2183 * Add a receive buffer to the indiciated descriptor.
2184 */
2185 int
2186 wm_add_rxbuf(struct wm_softc *sc, int idx)
2187 {
2188 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2189 struct mbuf *m;
2190 int error;
2191
2192 MGETHDR(m, M_DONTWAIT, MT_DATA);
2193 if (m == NULL)
2194 return (ENOBUFS);
2195
2196 MCLGET(m, M_DONTWAIT);
2197 if ((m->m_flags & M_EXT) == 0) {
2198 m_freem(m);
2199 return (ENOBUFS);
2200 }
2201
2202 if (rxs->rxs_mbuf != NULL)
2203 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2204
2205 rxs->rxs_mbuf = m;
2206
2207 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2208 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2209 BUS_DMA_READ|BUS_DMA_NOWAIT);
2210 if (error) {
2211 printf("%s: unable to load rx DMA map %d, error = %d\n",
2212 sc->sc_dev.dv_xname, idx, error);
2213 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2214 }
2215
2216 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2217 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2218
2219 WM_INIT_RXDESC(sc, idx);
2220
2221 return (0);
2222 }
2223
2224 /*
2225 * wm_set_ral:
2226 *
2227 * Set an entery in the receive address list.
2228 */
2229 static void
2230 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2231 {
2232 uint32_t ral_lo, ral_hi;
2233
2234 if (enaddr != NULL) {
2235 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2236 (enaddr[3] << 24);
2237 ral_hi = enaddr[4] | (enaddr[5] << 8);
2238 ral_hi |= RAL_AV;
2239 } else {
2240 ral_lo = 0;
2241 ral_hi = 0;
2242 }
2243
2244 if (sc->sc_type >= WM_T_CORDOVA) {
2245 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2246 ral_lo);
2247 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2248 ral_hi);
2249 } else {
2250 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2251 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2252 }
2253 }
2254
2255 /*
2256 * wm_mchash:
2257 *
2258 * Compute the hash of the multicast address for the 4096-bit
2259 * multicast filter.
2260 */
2261 static uint32_t
2262 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2263 {
2264 static const int lo_shift[4] = { 4, 3, 2, 0 };
2265 static const int hi_shift[4] = { 4, 5, 6, 8 };
2266 uint32_t hash;
2267
2268 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2269 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2270
2271 return (hash & 0xfff);
2272 }
2273
2274 /*
2275 * wm_set_filter:
2276 *
2277 * Set up the receive filter.
2278 */
2279 void
2280 wm_set_filter(struct wm_softc *sc)
2281 {
2282 struct ethercom *ec = &sc->sc_ethercom;
2283 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2284 struct ether_multi *enm;
2285 struct ether_multistep step;
2286 bus_addr_t mta_reg;
2287 uint32_t hash, reg, bit;
2288 int i;
2289
2290 if (sc->sc_type >= WM_T_CORDOVA)
2291 mta_reg = WMREG_CORDOVA_MTA;
2292 else
2293 mta_reg = WMREG_MTA;
2294
2295 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2296
2297 if (ifp->if_flags & IFF_BROADCAST)
2298 sc->sc_rctl |= RCTL_BAM;
2299 if (ifp->if_flags & IFF_PROMISC) {
2300 sc->sc_rctl |= RCTL_UPE;
2301 goto allmulti;
2302 }
2303
2304 /*
2305 * Set the station address in the first RAL slot, and
2306 * clear the remaining slots.
2307 */
2308 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2309 for (i = 1; i < WM_RAL_TABSIZE; i++)
2310 wm_set_ral(sc, NULL, i);
2311
2312 /* Clear out the multicast table. */
2313 for (i = 0; i < WM_MC_TABSIZE; i++)
2314 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2315
2316 ETHER_FIRST_MULTI(step, ec, enm);
2317 while (enm != NULL) {
2318 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2319 /*
2320 * We must listen to a range of multicast addresses.
2321 * For now, just accept all multicasts, rather than
2322 * trying to set only those filter bits needed to match
2323 * the range. (At this time, the only use of address
2324 * ranges is for IP multicast routing, for which the
2325 * range is big enough to require all bits set.)
2326 */
2327 goto allmulti;
2328 }
2329
2330 hash = wm_mchash(sc, enm->enm_addrlo);
2331
2332 reg = (hash >> 5) & 0x7f;
2333 bit = hash & 0x1f;
2334
2335 hash = CSR_READ(sc, mta_reg + (reg << 2));
2336 hash |= 1U << bit;
2337
2338 /* XXX Hardware bug?? */
2339 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2340 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2341 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2342 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2343 } else
2344 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2345
2346 ETHER_NEXT_MULTI(step, enm);
2347 }
2348
2349 ifp->if_flags &= ~IFF_ALLMULTI;
2350 goto setit;
2351
2352 allmulti:
2353 ifp->if_flags |= IFF_ALLMULTI;
2354 sc->sc_rctl |= RCTL_MPE;
2355
2356 setit:
2357 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2358 }
2359
2360 /*
2361 * wm_tbi_mediainit:
2362 *
2363 * Initialize media for use on 1000BASE-X devices.
2364 */
2365 void
2366 wm_tbi_mediainit(struct wm_softc *sc)
2367 {
2368 const char *sep = "";
2369
2370 if (sc->sc_type < WM_T_LIVENGOOD)
2371 sc->sc_tipg = TIPG_WM_DFLT;
2372 else
2373 sc->sc_tipg = TIPG_LG_DFLT;
2374
2375 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2376 wm_tbi_mediastatus);
2377
2378 /*
2379 * SWD Pins:
2380 *
2381 * 0 = Link LED (output)
2382 * 1 = Loss Of Signal (input)
2383 */
2384 sc->sc_ctrl |= CTRL_SWDPIO(0);
2385 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2386
2387 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2388
2389 #define ADD(s, m, d) \
2390 do { \
2391 printf("%s%s", sep, s); \
2392 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2393 sep = ", "; \
2394 } while (/*CONSTCOND*/0)
2395
2396 printf("%s: ", sc->sc_dev.dv_xname);
2397 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2398 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2399 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2400 printf("\n");
2401
2402 #undef ADD
2403
2404 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2405 }
2406
2407 /*
2408 * wm_tbi_mediastatus: [ifmedia interface function]
2409 *
2410 * Get the current interface media status on a 1000BASE-X device.
2411 */
2412 void
2413 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2414 {
2415 struct wm_softc *sc = ifp->if_softc;
2416
2417 ifmr->ifm_status = IFM_AVALID;
2418 ifmr->ifm_active = IFM_ETHER;
2419
2420 if (sc->sc_tbi_linkup == 0) {
2421 ifmr->ifm_active |= IFM_NONE;
2422 return;
2423 }
2424
2425 ifmr->ifm_status |= IFM_ACTIVE;
2426 ifmr->ifm_active |= IFM_1000_SX;
2427 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2428 ifmr->ifm_active |= IFM_FDX;
2429 }
2430
2431 /*
2432 * wm_tbi_mediachange: [ifmedia interface function]
2433 *
2434 * Set hardware to newly-selected media on a 1000BASE-X device.
2435 */
2436 int
2437 wm_tbi_mediachange(struct ifnet *ifp)
2438 {
2439 struct wm_softc *sc = ifp->if_softc;
2440 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2441 uint32_t status;
2442 int i;
2443
2444 sc->sc_txcw = ife->ifm_data;
2445 if (sc->sc_ctrl & CTRL_RFCE)
2446 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2447 if (sc->sc_ctrl & CTRL_TFCE)
2448 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2449 sc->sc_txcw |= TXCW_ANE;
2450
2451 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2452 delay(10000);
2453
2454 sc->sc_tbi_anstate = 0;
2455
2456 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2457 /* Have signal; wait for the link to come up. */
2458 for (i = 0; i < 50; i++) {
2459 delay(10000);
2460 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2461 break;
2462 }
2463
2464 status = CSR_READ(sc, WMREG_STATUS);
2465 if (status & STATUS_LU) {
2466 /* Link is up. */
2467 DPRINTF(WM_DEBUG_LINK,
2468 ("%s: LINK: set media -> link up %s\n",
2469 sc->sc_dev.dv_xname,
2470 (status & STATUS_FD) ? "FDX" : "HDX"));
2471 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2472 if (status & STATUS_FD)
2473 sc->sc_tctl |=
2474 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2475 else
2476 sc->sc_tctl |=
2477 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2478 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2479 sc->sc_tbi_linkup = 1;
2480 } else {
2481 /* Link is down. */
2482 DPRINTF(WM_DEBUG_LINK,
2483 ("%s: LINK: set media -> link down\n",
2484 sc->sc_dev.dv_xname));
2485 sc->sc_tbi_linkup = 0;
2486 }
2487 } else {
2488 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2489 sc->sc_dev.dv_xname));
2490 sc->sc_tbi_linkup = 0;
2491 }
2492
2493 wm_tbi_set_linkled(sc);
2494
2495 return (0);
2496 }
2497
2498 /*
2499 * wm_tbi_set_linkled:
2500 *
2501 * Update the link LED on 1000BASE-X devices.
2502 */
2503 void
2504 wm_tbi_set_linkled(struct wm_softc *sc)
2505 {
2506
2507 if (sc->sc_tbi_linkup)
2508 sc->sc_ctrl |= CTRL_SWDPIN(0);
2509 else
2510 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2511
2512 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2513 }
2514
2515 /*
2516 * wm_tbi_check_link:
2517 *
2518 * Check the link on 1000BASE-X devices.
2519 */
2520 void
2521 wm_tbi_check_link(struct wm_softc *sc)
2522 {
2523 uint32_t rxcw, ctrl, status;
2524
2525 if (sc->sc_tbi_anstate == 0)
2526 return;
2527 else if (sc->sc_tbi_anstate > 1) {
2528 DPRINTF(WM_DEBUG_LINK,
2529 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2530 sc->sc_tbi_anstate));
2531 sc->sc_tbi_anstate--;
2532 return;
2533 }
2534
2535 sc->sc_tbi_anstate = 0;
2536
2537 rxcw = CSR_READ(sc, WMREG_RXCW);
2538 ctrl = CSR_READ(sc, WMREG_CTRL);
2539 status = CSR_READ(sc, WMREG_STATUS);
2540
2541 if ((status & STATUS_LU) == 0) {
2542 DPRINTF(WM_DEBUG_LINK,
2543 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2544 sc->sc_tbi_linkup = 0;
2545 } else {
2546 DPRINTF(WM_DEBUG_LINK,
2547 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2548 (status & STATUS_FD) ? "FDX" : "HDX"));
2549 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2550 if (status & STATUS_FD)
2551 sc->sc_tctl |=
2552 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2553 else
2554 sc->sc_tctl |=
2555 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2556 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2557 sc->sc_tbi_linkup = 1;
2558 }
2559
2560 wm_tbi_set_linkled(sc);
2561 }
2562
2563 /*
2564 * wm_gmii_reset:
2565 *
2566 * Reset the PHY.
2567 */
2568 void
2569 wm_gmii_reset(struct wm_softc *sc)
2570 {
2571 uint32_t reg;
2572
2573 if (sc->sc_type >= WM_T_CORDOVA) {
2574 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2575 delay(20000);
2576
2577 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2578 delay(20000);
2579 } else {
2580 /* The PHY reset pin is active-low. */
2581 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2582 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2583 CTRL_EXT_SWDPIN(4));
2584 reg |= CTRL_EXT_SWDPIO(4);
2585
2586 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2587 delay(10);
2588
2589 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2590 delay(10);
2591
2592 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2593 delay(10);
2594 #if 0
2595 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2596 #endif
2597 }
2598 }
2599
2600 /*
2601 * wm_gmii_mediainit:
2602 *
2603 * Initialize media for use on 1000BASE-T devices.
2604 */
2605 void
2606 wm_gmii_mediainit(struct wm_softc *sc)
2607 {
2608 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2609
2610 /* We have MII. */
2611 sc->sc_flags |= WM_F_HAS_MII;
2612
2613 sc->sc_tipg = TIPG_1000T_DFLT;
2614
2615 /*
2616 * Let the chip set speed/duplex on its own based on
2617 * signals from the PHY.
2618 */
2619 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2620 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2621
2622 /* Initialize our media structures and probe the GMII. */
2623 sc->sc_mii.mii_ifp = ifp;
2624
2625 if (sc->sc_type >= WM_T_CORDOVA) {
2626 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2627 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2628 } else {
2629 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2630 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2631 }
2632 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2633
2634 wm_gmii_reset(sc);
2635
2636 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2637 wm_gmii_mediastatus);
2638
2639 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2640 MII_OFFSET_ANY, 0);
2641 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2642 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2643 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2644 } else
2645 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2646 }
2647
2648 /*
2649 * wm_gmii_mediastatus: [ifmedia interface function]
2650 *
2651 * Get the current interface media status on a 1000BASE-T device.
2652 */
2653 void
2654 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2655 {
2656 struct wm_softc *sc = ifp->if_softc;
2657
2658 mii_pollstat(&sc->sc_mii);
2659 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2660 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2661 }
2662
2663 /*
2664 * wm_gmii_mediachange: [ifmedia interface function]
2665 *
2666 * Set hardware to newly-selected media on a 1000BASE-T device.
2667 */
2668 int
2669 wm_gmii_mediachange(struct ifnet *ifp)
2670 {
2671 struct wm_softc *sc = ifp->if_softc;
2672
2673 if (ifp->if_flags & IFF_UP)
2674 mii_mediachg(&sc->sc_mii);
2675 return (0);
2676 }
2677
2678 #define MDI_IO CTRL_SWDPIN(2)
2679 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2680 #define MDI_CLK CTRL_SWDPIN(3)
2681
2682 static void
2683 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2684 {
2685 uint32_t i, v;
2686
2687 v = CSR_READ(sc, WMREG_CTRL);
2688 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2689 v |= MDI_DIR | CTRL_SWDPIO(3);
2690
2691 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2692 if (data & i)
2693 v |= MDI_IO;
2694 else
2695 v &= ~MDI_IO;
2696 CSR_WRITE(sc, WMREG_CTRL, v);
2697 delay(10);
2698 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2699 delay(10);
2700 CSR_WRITE(sc, WMREG_CTRL, v);
2701 delay(10);
2702 }
2703 }
2704
2705 static uint32_t
2706 livengood_mii_recvbits(struct wm_softc *sc)
2707 {
2708 uint32_t v, i, data = 0;
2709
2710 v = CSR_READ(sc, WMREG_CTRL);
2711 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2712 v |= CTRL_SWDPIO(3);
2713
2714 CSR_WRITE(sc, WMREG_CTRL, v);
2715 delay(10);
2716 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2717 delay(10);
2718 CSR_WRITE(sc, WMREG_CTRL, v);
2719 delay(10);
2720
2721 for (i = 0; i < 16; i++) {
2722 data <<= 1;
2723 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2724 delay(10);
2725 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2726 data |= 1;
2727 CSR_WRITE(sc, WMREG_CTRL, v);
2728 delay(10);
2729 }
2730
2731 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2732 delay(10);
2733 CSR_WRITE(sc, WMREG_CTRL, v);
2734 delay(10);
2735
2736 return (data);
2737 }
2738
2739 #undef MDI_IO
2740 #undef MDI_DIR
2741 #undef MDI_CLK
2742
2743 /*
2744 * wm_gmii_livengood_readreg: [mii interface function]
2745 *
2746 * Read a PHY register on the GMII (Livengood version).
2747 */
2748 int
2749 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2750 {
2751 struct wm_softc *sc = (void *) self;
2752 int rv;
2753
2754 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2755 livengood_mii_sendbits(sc, reg | (phy << 5) |
2756 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2757 rv = livengood_mii_recvbits(sc) & 0xffff;
2758
2759 DPRINTF(WM_DEBUG_GMII,
2760 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2761 sc->sc_dev.dv_xname, phy, reg, rv));
2762
2763 return (rv);
2764 }
2765
2766 /*
2767 * wm_gmii_livengood_writereg: [mii interface function]
2768 *
2769 * Write a PHY register on the GMII (Livengood version).
2770 */
2771 void
2772 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2773 {
2774 struct wm_softc *sc = (void *) self;
2775
2776 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2777 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2778 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2779 (MII_COMMAND_START << 30), 32);
2780 }
2781
2782 /*
2783 * wm_gmii_cordova_readreg: [mii interface function]
2784 *
2785 * Read a PHY register on the GMII.
2786 */
2787 int
2788 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2789 {
2790 struct wm_softc *sc = (void *) self;
2791 uint32_t mdic;
2792 int i, rv;
2793
2794 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2795 MDIC_REGADD(reg));
2796
2797 for (i = 0; i < 100; i++) {
2798 mdic = CSR_READ(sc, WMREG_MDIC);
2799 if (mdic & MDIC_READY)
2800 break;
2801 delay(10);
2802 }
2803
2804 if ((mdic & MDIC_READY) == 0) {
2805 printf("%s: MDIC read timed out: phy %d reg %d\n",
2806 sc->sc_dev.dv_xname, phy, reg);
2807 rv = 0;
2808 } else if (mdic & MDIC_E) {
2809 #if 0 /* This is normal if no PHY is present. */
2810 printf("%s: MDIC read error: phy %d reg %d\n",
2811 sc->sc_dev.dv_xname, phy, reg);
2812 #endif
2813 rv = 0;
2814 } else {
2815 rv = MDIC_DATA(mdic);
2816 if (rv == 0xffff)
2817 rv = 0;
2818 }
2819
2820 return (rv);
2821 }
2822
2823 /*
2824 * wm_gmii_cordova_writereg: [mii interface function]
2825 *
2826 * Write a PHY register on the GMII.
2827 */
2828 void
2829 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2830 {
2831 struct wm_softc *sc = (void *) self;
2832 uint32_t mdic;
2833 int i;
2834
2835 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2836 MDIC_REGADD(reg) | MDIC_DATA(val));
2837
2838 for (i = 0; i < 100; i++) {
2839 mdic = CSR_READ(sc, WMREG_MDIC);
2840 if (mdic & MDIC_READY)
2841 break;
2842 delay(10);
2843 }
2844
2845 if ((mdic & MDIC_READY) == 0)
2846 printf("%s: MDIC write timed out: phy %d reg %d\n",
2847 sc->sc_dev.dv_xname, phy, reg);
2848 else if (mdic & MDIC_E)
2849 printf("%s: MDIC write error: phy %d reg %d\n",
2850 sc->sc_dev.dv_xname, phy, reg);
2851 }
2852
2853 /*
2854 * wm_gmii_statchg: [mii interface function]
2855 *
2856 * Callback from MII layer when media changes.
2857 */
2858 void
2859 wm_gmii_statchg(struct device *self)
2860 {
2861 struct wm_softc *sc = (void *) self;
2862
2863 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2864
2865 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2866 DPRINTF(WM_DEBUG_LINK,
2867 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2868 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2869 } else {
2870 DPRINTF(WM_DEBUG_LINK,
2871 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2872 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2873 }
2874
2875 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2876 }
2877