if_wm.c revision 1.10 1 /* $NetBSD: if_wm.c,v 1.10 2002/07/09 14:52:37 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40 * and i82544 (``Cordova'') Gigabit Ethernet chips.
41 *
42 * TODO (in order of importance):
43 *
44 * - Fix hw VLAN assist.
45 *
46 * - Make GMII work on the Livengood.
47 *
48 * - Fix out-bound IP header checksums.
49 *
50 * - Fix UDP checksums.
51 *
52 * - Jumbo frames -- requires changes to network stack due to
53 * lame buffer length handling on chip.
54 *
55 * ...and, of course, performance tuning.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and mange
117 * up to 64 of them at a time. We allow up to 16 DMA segments per
118 * packet.
119 */
120 #define WM_NTXSEGS 16
121 #define WM_IFQUEUELEN 256
122 #define WM_TXQUEUELEN 64
123 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
124 #define WM_TXQUEUE_GC (WM_TXQUEUELEN / 8)
125 #define WM_NTXDESC 256
126 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
127 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
128 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
129
130 /*
131 * Receive descriptor list size. We have one Rx buffer for normal
132 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
133 * packet. We allocate 256 receive descriptors, each with a 2k
134 * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
135 */
136 #define WM_NRXDESC 256
137 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
138 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
139 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
140
141 /*
142 * Control structures are DMA'd to the i82542 chip. We allocate them in
143 * a single clump that maps to a single DMA segment to make serveral things
144 * easier.
145 */
146 struct wm_control_data {
147 /*
148 * The transmit descriptors.
149 */
150 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
151
152 /*
153 * The receive descriptors.
154 */
155 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
156 };
157
158 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
159 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
160 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
161
162 /*
163 * Software state for transmit jobs.
164 */
165 struct wm_txsoft {
166 struct mbuf *txs_mbuf; /* head of our mbuf chain */
167 bus_dmamap_t txs_dmamap; /* our DMA map */
168 int txs_firstdesc; /* first descriptor in packet */
169 int txs_lastdesc; /* last descriptor in packet */
170 int txs_ndesc; /* # of descriptors used */
171 };
172
173 /*
174 * Software state for receive buffers. Each descriptor gets a
175 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
176 * more than one buffer, we chain them together.
177 */
178 struct wm_rxsoft {
179 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
180 bus_dmamap_t rxs_dmamap; /* our DMA map */
181 };
182
183 /*
184 * Software state per device.
185 */
186 struct wm_softc {
187 struct device sc_dev; /* generic device information */
188 bus_space_tag_t sc_st; /* bus space tag */
189 bus_space_handle_t sc_sh; /* bus space handle */
190 bus_dma_tag_t sc_dmat; /* bus DMA tag */
191 struct ethercom sc_ethercom; /* ethernet common data */
192 void *sc_sdhook; /* shutdown hook */
193
194 int sc_type; /* chip type; see below */
195 int sc_flags; /* flags; see below */
196
197 void *sc_ih; /* interrupt cookie */
198
199 struct mii_data sc_mii; /* MII/media information */
200
201 struct callout sc_tick_ch; /* tick callout */
202
203 bus_dmamap_t sc_cddmamap; /* control data DMA map */
204 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
205
206 /*
207 * Software state for the transmit and receive descriptors.
208 */
209 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
210 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
211
212 /*
213 * Control data structures.
214 */
215 struct wm_control_data *sc_control_data;
216 #define sc_txdescs sc_control_data->wcd_txdescs
217 #define sc_rxdescs sc_control_data->wcd_rxdescs
218
219 #ifdef WM_EVENT_COUNTERS
220 /* Event counters. */
221 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
222 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
223 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */
224 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */
225 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */
226 struct evcnt sc_ev_rxintr; /* Rx interrupts */
227 struct evcnt sc_ev_linkintr; /* Link interrupts */
228
229 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
230 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
231 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
232 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
233
234 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */
235 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */
236 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */
237
238 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
239 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
240
241 struct evcnt sc_ev_tu; /* Tx underrun */
242 #endif /* WM_EVENT_COUNTERS */
243
244 bus_addr_t sc_tdt_reg; /* offset of TDT register */
245
246 int sc_txfree; /* number of free Tx descriptors */
247 int sc_txnext; /* next ready Tx descriptor */
248
249 int sc_txsfree; /* number of free Tx jobs */
250 int sc_txsnext; /* next free Tx job */
251 int sc_txsdirty; /* dirty Tx jobs */
252
253 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */
254 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */
255
256 bus_addr_t sc_rdt_reg; /* offset of RDT register */
257
258 int sc_rxptr; /* next ready Rx descriptor/queue ent */
259 int sc_rxdiscard;
260 int sc_rxlen;
261 struct mbuf *sc_rxhead;
262 struct mbuf *sc_rxtail;
263 struct mbuf **sc_rxtailp;
264
265 uint32_t sc_ctrl; /* prototype CTRL register */
266 #if 0
267 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
268 #endif
269 uint32_t sc_icr; /* prototype interrupt bits */
270 uint32_t sc_tctl; /* prototype TCTL register */
271 uint32_t sc_rctl; /* prototype RCTL register */
272 uint32_t sc_txcw; /* prototype TXCW register */
273 uint32_t sc_tipg; /* prototype TIPG register */
274
275 int sc_tbi_linkup; /* TBI link status */
276 int sc_tbi_anstate; /* autonegotiation state */
277
278 int sc_mchash_type; /* multicast filter offset */
279 };
280
281 #define WM_RXCHAIN_RESET(sc) \
282 do { \
283 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
284 *(sc)->sc_rxtailp = NULL; \
285 (sc)->sc_rxlen = 0; \
286 } while (/*CONSTCOND*/0)
287
288 #define WM_RXCHAIN_LINK(sc, m) \
289 do { \
290 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
291 (sc)->sc_rxtailp = &(m)->m_next; \
292 } while (/*CONSTCOND*/0)
293
294 /* sc_type */
295 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
296 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
297 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
298 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
299
300 /* sc_flags */
301 #define WM_F_HAS_MII 0x01 /* has MII */
302
303 #ifdef WM_EVENT_COUNTERS
304 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
305 #else
306 #define WM_EVCNT_INCR(ev) /* nothing */
307 #endif
308
309 #define CSR_READ(sc, reg) \
310 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
311 #define CSR_WRITE(sc, reg, val) \
312 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
313
314 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
315 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
316
317 #define WM_CDTXSYNC(sc, x, n, ops) \
318 do { \
319 int __x, __n; \
320 \
321 __x = (x); \
322 __n = (n); \
323 \
324 /* If it will wrap around, sync to the end of the ring. */ \
325 if ((__x + __n) > WM_NTXDESC) { \
326 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
327 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
328 (WM_NTXDESC - __x), (ops)); \
329 __n -= (WM_NTXDESC - __x); \
330 __x = 0; \
331 } \
332 \
333 /* Now sync whatever is left. */ \
334 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
335 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
336 } while (/*CONSTCOND*/0)
337
338 #define WM_CDRXSYNC(sc, x, ops) \
339 do { \
340 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
341 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
342 } while (/*CONSTCOND*/0)
343
344 #define WM_INIT_RXDESC(sc, x) \
345 do { \
346 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
347 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
348 struct mbuf *__m = __rxs->rxs_mbuf; \
349 \
350 /* \
351 * Note: We scoot the packet forward 2 bytes in the buffer \
352 * so that the payload after the Ethernet header is aligned \
353 * to a 4-byte boundary. \
354 * \
355 * XXX BRAINDAMAGE ALERT! \
356 * The stupid chip uses the same size for every buffer, which \
357 * is set in the Receive Control register. We are using the 2K \
358 * size option, but what we REALLY want is (2K - 2)! For this \
359 * reason, we can't accept packets longer than the standard \
360 * Ethernet MTU, without incurring a big penalty to copy every \
361 * incoming packet to a new, suitably aligned buffer. \
362 * \
363 * We'll need to make some changes to the layer 3/4 parts of \
364 * the stack (to copy the headers to a new buffer if not \
365 * aligned) in order to support large MTU on this chip. Lame. \
366 */ \
367 __m->m_data = __m->m_ext.ext_buf + 2; \
368 \
369 __rxd->wrx_addr.wa_low = \
370 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
371 __rxd->wrx_addr.wa_high = 0; \
372 __rxd->wrx_len = 0; \
373 __rxd->wrx_cksum = 0; \
374 __rxd->wrx_status = 0; \
375 __rxd->wrx_errors = 0; \
376 __rxd->wrx_special = 0; \
377 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
378 \
379 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
380 } while (/*CONSTCOND*/0)
381
382 void wm_start(struct ifnet *);
383 void wm_watchdog(struct ifnet *);
384 int wm_ioctl(struct ifnet *, u_long, caddr_t);
385 int wm_init(struct ifnet *);
386 void wm_stop(struct ifnet *, int);
387
388 void wm_shutdown(void *);
389
390 void wm_reset(struct wm_softc *);
391 void wm_rxdrain(struct wm_softc *);
392 int wm_add_rxbuf(struct wm_softc *, int);
393 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
394 void wm_tick(void *);
395
396 void wm_set_filter(struct wm_softc *);
397
398 int wm_intr(void *);
399 void wm_txintr(struct wm_softc *);
400 void wm_rxintr(struct wm_softc *);
401 void wm_linkintr(struct wm_softc *, uint32_t);
402
403 void wm_tbi_mediainit(struct wm_softc *);
404 int wm_tbi_mediachange(struct ifnet *);
405 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
406
407 void wm_tbi_set_linkled(struct wm_softc *);
408 void wm_tbi_check_link(struct wm_softc *);
409
410 void wm_gmii_reset(struct wm_softc *);
411
412 int wm_gmii_livengood_readreg(struct device *, int, int);
413 void wm_gmii_livengood_writereg(struct device *, int, int, int);
414
415 int wm_gmii_cordova_readreg(struct device *, int, int);
416 void wm_gmii_cordova_writereg(struct device *, int, int, int);
417
418 void wm_gmii_statchg(struct device *);
419
420 void wm_gmii_mediainit(struct wm_softc *);
421 int wm_gmii_mediachange(struct ifnet *);
422 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
423
424 int wm_match(struct device *, struct cfdata *, void *);
425 void wm_attach(struct device *, struct device *, void *);
426
427 int wm_copy_small = 0;
428
429 struct cfattach wm_ca = {
430 sizeof(struct wm_softc), wm_match, wm_attach,
431 };
432
433 /*
434 * Devices supported by this driver.
435 */
436 const struct wm_product {
437 pci_vendor_id_t wmp_vendor;
438 pci_product_id_t wmp_product;
439 const char *wmp_name;
440 int wmp_type;
441 int wmp_flags;
442 #define WMP_F_1000X 0x01
443 #define WMP_F_1000T 0x02
444 } wm_products[] = {
445 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
446 "Intel i82542 1000BASE-X Ethernet",
447 WM_T_WISEMAN_2_1, WMP_F_1000X },
448
449 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
450 "Intel i82543 1000BASE-X Ethernet",
451 WM_T_LIVENGOOD, WMP_F_1000X },
452
453 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
454 "Intel i82543-SC 1000BASE-X Ethernet",
455 WM_T_LIVENGOOD, WMP_F_1000X },
456
457 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
458 "Intel i82543 1000BASE-T Ethernet",
459 WM_T_LIVENGOOD, WMP_F_1000T },
460
461 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
462 "Intel i82544 1000BASE-T Ethernet",
463 WM_T_CORDOVA, WMP_F_1000T },
464
465 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
466 "Intel i82544 1000BASE-X Ethernet",
467 WM_T_CORDOVA, WMP_F_1000X },
468
469 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
470 "Intel i82544GC 1000BASE-T Ethernet",
471 WM_T_CORDOVA, WMP_F_1000T },
472
473 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
474 "Intel i82544GC 1000BASE-T Ethernet",
475 WM_T_CORDOVA, WMP_F_1000T },
476
477 { 0, 0,
478 NULL,
479 0, 0 },
480 };
481
482 #ifdef WM_EVENT_COUNTERS
483 #if WM_NTXSEGS != 16
484 #error Update wm_txseg_evcnt_names
485 #endif
486 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
487 "txseg1",
488 "txseg2",
489 "txseg3",
490 "txseg4",
491 "txseg5",
492 "txseg6",
493 "txseg7",
494 "txseg8",
495 "txseg9",
496 "txseg10",
497 "txseg11",
498 "txseg12",
499 "txseg13",
500 "txseg14",
501 "txseg15",
502 "txseg16",
503 };
504 #endif /* WM_EVENT_COUNTERS */
505
506 static const struct wm_product *
507 wm_lookup(const struct pci_attach_args *pa)
508 {
509 const struct wm_product *wmp;
510
511 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
512 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
513 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
514 return (wmp);
515 }
516 return (NULL);
517 }
518
519 int
520 wm_match(struct device *parent, struct cfdata *cf, void *aux)
521 {
522 struct pci_attach_args *pa = aux;
523
524 if (wm_lookup(pa) != NULL)
525 return (1);
526
527 return (0);
528 }
529
530 void
531 wm_attach(struct device *parent, struct device *self, void *aux)
532 {
533 struct wm_softc *sc = (void *) self;
534 struct pci_attach_args *pa = aux;
535 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
536 pci_chipset_tag_t pc = pa->pa_pc;
537 pci_intr_handle_t ih;
538 const char *intrstr = NULL;
539 bus_space_tag_t memt;
540 bus_space_handle_t memh;
541 bus_dma_segment_t seg;
542 int memh_valid;
543 int i, rseg, error;
544 const struct wm_product *wmp;
545 uint8_t enaddr[ETHER_ADDR_LEN];
546 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
547 pcireg_t preg, memtype;
548 int pmreg;
549
550 callout_init(&sc->sc_tick_ch);
551
552 wmp = wm_lookup(pa);
553 if (wmp == NULL) {
554 printf("\n");
555 panic("wm_attach: impossible");
556 }
557
558 sc->sc_dmat = pa->pa_dmat;
559
560 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
561 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
562
563 sc->sc_type = wmp->wmp_type;
564 if (sc->sc_type < WM_T_LIVENGOOD) {
565 if (preg < 2) {
566 printf("%s: Wiseman must be at least rev. 2\n",
567 sc->sc_dev.dv_xname);
568 return;
569 }
570 if (preg < 3)
571 sc->sc_type = WM_T_WISEMAN_2_0;
572 }
573
574 /*
575 * Map the device.
576 */
577 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
578 switch (memtype) {
579 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
580 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
581 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
582 memtype, 0, &memt, &memh, NULL, NULL) == 0);
583 break;
584 default:
585 memh_valid = 0;
586 }
587
588 if (memh_valid) {
589 sc->sc_st = memt;
590 sc->sc_sh = memh;
591 } else {
592 printf("%s: unable to map device registers\n",
593 sc->sc_dev.dv_xname);
594 return;
595 }
596
597 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
598 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
599 preg |= PCI_COMMAND_MASTER_ENABLE;
600 if (sc->sc_type < WM_T_WISEMAN_2_1)
601 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
602 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
603
604 /* Get it out of power save mode, if needed. */
605 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
606 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
607 if (preg == 3) {
608 /*
609 * The card has lost all configuration data in
610 * this state, so punt.
611 */
612 printf("%s: unable to wake from power state D3\n",
613 sc->sc_dev.dv_xname);
614 return;
615 }
616 if (preg != 0) {
617 printf("%s: waking up from power state D%d\n",
618 sc->sc_dev.dv_xname, preg);
619 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
620 }
621 }
622
623 /*
624 * Map and establish our interrupt.
625 */
626 if (pci_intr_map(pa, &ih)) {
627 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
628 return;
629 }
630 intrstr = pci_intr_string(pc, ih);
631 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
632 if (sc->sc_ih == NULL) {
633 printf("%s: unable to establish interrupt",
634 sc->sc_dev.dv_xname);
635 if (intrstr != NULL)
636 printf(" at %s", intrstr);
637 printf("\n");
638 return;
639 }
640 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
641
642 /*
643 * Allocate the control data structures, and create and load the
644 * DMA map for it.
645 */
646 if ((error = bus_dmamem_alloc(sc->sc_dmat,
647 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
648 0)) != 0) {
649 printf("%s: unable to allocate control data, error = %d\n",
650 sc->sc_dev.dv_xname, error);
651 goto fail_0;
652 }
653
654 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
655 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
656 BUS_DMA_COHERENT)) != 0) {
657 printf("%s: unable to map control data, error = %d\n",
658 sc->sc_dev.dv_xname, error);
659 goto fail_1;
660 }
661
662 if ((error = bus_dmamap_create(sc->sc_dmat,
663 sizeof(struct wm_control_data), 1,
664 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
665 printf("%s: unable to create control data DMA map, "
666 "error = %d\n", sc->sc_dev.dv_xname, error);
667 goto fail_2;
668 }
669
670 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
671 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
672 0)) != 0) {
673 printf("%s: unable to load control data DMA map, error = %d\n",
674 sc->sc_dev.dv_xname, error);
675 goto fail_3;
676 }
677
678 /*
679 * Create the transmit buffer DMA maps.
680 */
681 for (i = 0; i < WM_TXQUEUELEN; i++) {
682 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
683 WM_NTXSEGS, MCLBYTES, 0, 0,
684 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
685 printf("%s: unable to create Tx DMA map %d, "
686 "error = %d\n", sc->sc_dev.dv_xname, i, error);
687 goto fail_4;
688 }
689 }
690
691 /*
692 * Create the receive buffer DMA maps.
693 */
694 for (i = 0; i < WM_NRXDESC; i++) {
695 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
696 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
697 printf("%s: unable to create Rx DMA map %d, "
698 "error = %d\n", sc->sc_dev.dv_xname, i, error);
699 goto fail_5;
700 }
701 sc->sc_rxsoft[i].rxs_mbuf = NULL;
702 }
703
704 /*
705 * Reset the chip to a known state.
706 */
707 wm_reset(sc);
708
709 /*
710 * Read the Ethernet address from the EEPROM.
711 */
712 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
713 sizeof(myea) / sizeof(myea[0]), myea);
714 enaddr[0] = myea[0] & 0xff;
715 enaddr[1] = myea[0] >> 8;
716 enaddr[2] = myea[1] & 0xff;
717 enaddr[3] = myea[1] >> 8;
718 enaddr[4] = myea[2] & 0xff;
719 enaddr[5] = myea[2] >> 8;
720
721 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
722 ether_sprintf(enaddr));
723
724 /*
725 * Read the config info from the EEPROM, and set up various
726 * bits in the control registers based on their contents.
727 */
728 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
729 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
730 if (sc->sc_type >= WM_T_CORDOVA)
731 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
732
733 if (cfg1 & EEPROM_CFG1_ILOS)
734 sc->sc_ctrl |= CTRL_ILOS;
735 if (sc->sc_type >= WM_T_CORDOVA) {
736 sc->sc_ctrl |=
737 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
738 CTRL_SWDPIO_SHIFT;
739 sc->sc_ctrl |=
740 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
741 CTRL_SWDPINS_SHIFT;
742 } else {
743 sc->sc_ctrl |=
744 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
745 CTRL_SWDPIO_SHIFT;
746 }
747
748 #if 0
749 if (sc->sc_type >= WM_T_CORDOVA) {
750 if (cfg1 & EEPROM_CFG1_IPS0)
751 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
752 if (cfg1 & EEPROM_CFG1_IPS1)
753 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
754 sc->sc_ctrl_ext |=
755 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
756 CTRL_EXT_SWDPIO_SHIFT;
757 sc->sc_ctrl_ext |=
758 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
759 CTRL_EXT_SWDPINS_SHIFT;
760 } else {
761 sc->sc_ctrl_ext |=
762 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
763 CTRL_EXT_SWDPIO_SHIFT;
764 }
765 #endif
766
767 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
768 #if 0
769 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
770 #endif
771
772 /*
773 * Set up some register offsets that are different between
774 * the Wiseman and the Livengood and later chips.
775 */
776 if (sc->sc_type < WM_T_LIVENGOOD) {
777 sc->sc_rdt_reg = WMREG_OLD_RDT0;
778 sc->sc_tdt_reg = WMREG_OLD_TDT;
779 } else {
780 sc->sc_rdt_reg = WMREG_RDT;
781 sc->sc_tdt_reg = WMREG_TDT;
782 }
783
784 /*
785 * Determine if we should use flow control. We should
786 * always use it, unless we're on a Wiseman < 2.1.
787 */
788 if (sc->sc_type >= WM_T_WISEMAN_2_1)
789 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
790
791 /*
792 * Determine if we're TBI or GMII mode, and initialize the
793 * media structures accordingly.
794 */
795 if (sc->sc_type < WM_T_LIVENGOOD ||
796 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
797 if (wmp->wmp_flags & WMP_F_1000T)
798 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
799 "product!\n", sc->sc_dev.dv_xname);
800 wm_tbi_mediainit(sc);
801 } else {
802 if (wmp->wmp_flags & WMP_F_1000X)
803 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
804 "product!\n", sc->sc_dev.dv_xname);
805 wm_gmii_mediainit(sc);
806 }
807
808 ifp = &sc->sc_ethercom.ec_if;
809 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
810 ifp->if_softc = sc;
811 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
812 ifp->if_ioctl = wm_ioctl;
813 ifp->if_start = wm_start;
814 ifp->if_watchdog = wm_watchdog;
815 ifp->if_init = wm_init;
816 ifp->if_stop = wm_stop;
817 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
818 IFQ_SET_READY(&ifp->if_snd);
819
820 /*
821 * If we're a Livengood or greater, we can support VLANs.
822 */
823 if (sc->sc_type >= WM_T_LIVENGOOD)
824 sc->sc_ethercom.ec_capabilities |=
825 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
826
827 /*
828 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
829 * on Livengood and later.
830 */
831 if (sc->sc_type >= WM_T_LIVENGOOD)
832 ifp->if_capabilities |=
833 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
834
835 /*
836 * Attach the interface.
837 */
838 if_attach(ifp);
839 ether_ifattach(ifp, enaddr);
840
841 #ifdef WM_EVENT_COUNTERS
842 /* Attach event counters. */
843 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
844 NULL, sc->sc_dev.dv_xname, "txsstall");
845 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
846 NULL, sc->sc_dev.dv_xname, "txdstall");
847 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC,
848 NULL, sc->sc_dev.dv_xname, "txforceintr");
849 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
850 NULL, sc->sc_dev.dv_xname, "txdw");
851 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
852 NULL, sc->sc_dev.dv_xname, "txqe");
853 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
854 NULL, sc->sc_dev.dv_xname, "rxintr");
855 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
856 NULL, sc->sc_dev.dv_xname, "linkintr");
857
858 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
859 NULL, sc->sc_dev.dv_xname, "rxipsum");
860 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
861 NULL, sc->sc_dev.dv_xname, "rxtusum");
862 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
863 NULL, sc->sc_dev.dv_xname, "txipsum");
864 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
865 NULL, sc->sc_dev.dv_xname, "txtusum");
866
867 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC,
868 NULL, sc->sc_dev.dv_xname, "txctx init");
869 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC,
870 NULL, sc->sc_dev.dv_xname, "txctx hit");
871 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC,
872 NULL, sc->sc_dev.dv_xname, "txctx miss");
873
874 for (i = 0; i < WM_NTXSEGS; i++)
875 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
876 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
877
878 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
879 NULL, sc->sc_dev.dv_xname, "txdrop");
880
881 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
882 NULL, sc->sc_dev.dv_xname, "tu");
883 #endif /* WM_EVENT_COUNTERS */
884
885 /*
886 * Make sure the interface is shutdown during reboot.
887 */
888 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
889 if (sc->sc_sdhook == NULL)
890 printf("%s: WARNING: unable to establish shutdown hook\n",
891 sc->sc_dev.dv_xname);
892 return;
893
894 /*
895 * Free any resources we've allocated during the failed attach
896 * attempt. Do this in reverse order and fall through.
897 */
898 fail_5:
899 for (i = 0; i < WM_NRXDESC; i++) {
900 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
901 bus_dmamap_destroy(sc->sc_dmat,
902 sc->sc_rxsoft[i].rxs_dmamap);
903 }
904 fail_4:
905 for (i = 0; i < WM_TXQUEUELEN; i++) {
906 if (sc->sc_txsoft[i].txs_dmamap != NULL)
907 bus_dmamap_destroy(sc->sc_dmat,
908 sc->sc_txsoft[i].txs_dmamap);
909 }
910 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
911 fail_3:
912 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
913 fail_2:
914 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
915 sizeof(struct wm_control_data));
916 fail_1:
917 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
918 fail_0:
919 return;
920 }
921
922 /*
923 * wm_shutdown:
924 *
925 * Make sure the interface is stopped at reboot time.
926 */
927 void
928 wm_shutdown(void *arg)
929 {
930 struct wm_softc *sc = arg;
931
932 wm_stop(&sc->sc_ethercom.ec_if, 1);
933 }
934
935 /*
936 * wm_tx_cksum:
937 *
938 * Set up TCP/IP checksumming parameters for the
939 * specified packet.
940 */
941 static int
942 wm_tx_cksum(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
943 uint32_t *fieldsp)
944 {
945 struct mbuf *m0 = txs->txs_mbuf;
946 struct livengood_tcpip_ctxdesc *t;
947 uint32_t fields = 0, ipcs, tucs;
948 struct ip *ip;
949 int offset, iphl;
950
951 /*
952 * XXX It would be nice if the mbuf pkthdr had offset
953 * fields for the protocol headers.
954 */
955
956 /* XXX Assumes normal Ethernet encap. */
957 offset = ETHER_HDR_LEN;
958
959 /* XXX */
960 if (m0->m_len < (offset + sizeof(struct ip))) {
961 printf("%s: wm_tx_cksum: need to m_pullup, "
962 "packet dropped\n", sc->sc_dev.dv_xname);
963 return (EINVAL);
964 }
965
966 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
967 iphl = ip->ip_hl << 2;
968
969 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
970 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
971 fields |= htole32(WTX_IXSM);
972 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
973 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
974 WTX_TCPIP_IPCSE(offset + iphl - 1));
975 } else
976 ipcs = 0;
977
978 offset += iphl;
979
980 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
981 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
982 fields |= htole32(WTX_TXSM);
983 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
984 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
985 WTX_TCPIP_TUCSE(0) /* rest of packet */);
986 } else
987 tucs = 0;
988
989 if (sc->sc_txctx_ipcs == ipcs &&
990 sc->sc_txctx_tucs == tucs) {
991 /* Cached context is fine. */
992 WM_EVCNT_INCR(&sc->sc_ev_txctx_hit);
993 } else {
994 /* Fill in the context descriptor. */
995 #ifdef WM_EVENT_COUNTERS
996 if (sc->sc_txctx_ipcs == 0xffffffff &&
997 sc->sc_txctx_tucs == 0xffffffff)
998 WM_EVCNT_INCR(&sc->sc_ev_txctx_init);
999 else
1000 WM_EVCNT_INCR(&sc->sc_ev_txctx_miss);
1001 #endif
1002 t = (struct livengood_tcpip_ctxdesc *)
1003 &sc->sc_txdescs[sc->sc_txnext];
1004 t->tcpip_ipcs = ipcs;
1005 t->tcpip_tucs = tucs;
1006 t->tcpip_cmdlen =
1007 htole32(WTX_CMD_DEXT | WTX_DTYP_C);
1008 t->tcpip_seg = 0;
1009 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
1010
1011 sc->sc_txctx_ipcs = ipcs;
1012 sc->sc_txctx_tucs = tucs;
1013
1014 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
1015 txs->txs_ndesc++;
1016 }
1017
1018 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
1019 *fieldsp = fields;
1020
1021 return (0);
1022 }
1023
1024 /*
1025 * wm_start: [ifnet interface function]
1026 *
1027 * Start packet transmission on the interface.
1028 */
1029 void
1030 wm_start(struct ifnet *ifp)
1031 {
1032 struct wm_softc *sc = ifp->if_softc;
1033 struct mbuf *m0/*, *m*/;
1034 struct wm_txsoft *txs;
1035 bus_dmamap_t dmamap;
1036 int error, nexttx, lasttx, ofree, seg;
1037 uint32_t cksumcmd, cksumfields;
1038
1039 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1040 return;
1041
1042 /*
1043 * Remember the previous number of free descriptors.
1044 */
1045 ofree = sc->sc_txfree;
1046
1047 /*
1048 * Loop through the send queue, setting up transmit descriptors
1049 * until we drain the queue, or use up all available transmit
1050 * descriptors.
1051 */
1052 for (;;) {
1053 /* Grab a packet off the queue. */
1054 IFQ_POLL(&ifp->if_snd, m0);
1055 if (m0 == NULL)
1056 break;
1057
1058 DPRINTF(WM_DEBUG_TX,
1059 ("%s: TX: have packet to transmit: %p\n",
1060 sc->sc_dev.dv_xname, m0));
1061
1062 /* Get a work queue entry. */
1063 if (sc->sc_txsfree < WM_TXQUEUE_GC) {
1064 wm_txintr(sc);
1065 if (sc->sc_txsfree == 0) {
1066 DPRINTF(WM_DEBUG_TX,
1067 ("%s: TX: no free job descriptors\n",
1068 sc->sc_dev.dv_xname));
1069 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1070 break;
1071 }
1072 }
1073
1074 txs = &sc->sc_txsoft[sc->sc_txsnext];
1075 dmamap = txs->txs_dmamap;
1076
1077 /*
1078 * Load the DMA map. If this fails, the packet either
1079 * didn't fit in the allotted number of segments, or we
1080 * were short on resources. For the too-many-segments
1081 * case, we simply report an error and drop the packet,
1082 * since we can't sanely copy a jumbo packet to a single
1083 * buffer.
1084 */
1085 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1086 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1087 if (error) {
1088 if (error == EFBIG) {
1089 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1090 printf("%s: Tx packet consumes too many "
1091 "DMA segments, dropping...\n",
1092 sc->sc_dev.dv_xname);
1093 IFQ_DEQUEUE(&ifp->if_snd, m0);
1094 m_freem(m0);
1095 continue;
1096 }
1097 /*
1098 * Short on resources, just stop for now.
1099 */
1100 DPRINTF(WM_DEBUG_TX,
1101 ("%s: TX: dmamap load failed: %d\n",
1102 sc->sc_dev.dv_xname, error));
1103 break;
1104 }
1105
1106 /*
1107 * Ensure we have enough descriptors free to describe
1108 * the packet. Note, we always reserve one descriptor
1109 * at the end of the ring due to the semantics of the
1110 * TDT register, plus one more in the event we need
1111 * to re-load checksum offload context.
1112 */
1113 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1114 /*
1115 * Not enough free descriptors to transmit this
1116 * packet. We haven't committed anything yet,
1117 * so just unload the DMA map, put the packet
1118 * pack on the queue, and punt. Notify the upper
1119 * layer that there are no more slots left.
1120 */
1121 DPRINTF(WM_DEBUG_TX,
1122 ("%s: TX: need %d descriptors, have %d\n",
1123 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1124 sc->sc_txfree - 1));
1125 ifp->if_flags |= IFF_OACTIVE;
1126 bus_dmamap_unload(sc->sc_dmat, dmamap);
1127 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1128 break;
1129 }
1130
1131 IFQ_DEQUEUE(&ifp->if_snd, m0);
1132
1133 /*
1134 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1135 */
1136
1137 /* Sync the DMA map. */
1138 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1139 BUS_DMASYNC_PREWRITE);
1140
1141 DPRINTF(WM_DEBUG_TX,
1142 ("%s: TX: packet has %d DMA segments\n",
1143 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1144
1145 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1146
1147 /*
1148 * Store a pointer to the packet so that we can free it
1149 * later.
1150 *
1151 * Initially, we consider the number of descriptors the
1152 * packet uses the number of DMA segments. This may be
1153 * incremented by 1 if we do checksum offload (a descriptor
1154 * is used to set the checksum context).
1155 */
1156 txs->txs_mbuf = m0;
1157 txs->txs_firstdesc = sc->sc_txnext;
1158 txs->txs_ndesc = dmamap->dm_nsegs;
1159
1160 /*
1161 * Set up checksum offload parameters for
1162 * this packet.
1163 */
1164 if (m0->m_pkthdr.csum_flags &
1165 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1166 if (wm_tx_cksum(sc, txs, &cksumcmd,
1167 &cksumfields) != 0) {
1168 /* Error message already displayed. */
1169 m_freem(m0);
1170 bus_dmamap_unload(sc->sc_dmat, dmamap);
1171 txs->txs_mbuf = NULL;
1172 continue;
1173 }
1174 } else {
1175 cksumcmd = 0;
1176 cksumfields = 0;
1177 }
1178
1179 cksumcmd |= htole32(WTX_CMD_IDE);
1180
1181 /*
1182 * Initialize the transmit descriptor.
1183 */
1184 for (nexttx = sc->sc_txnext, seg = 0;
1185 seg < dmamap->dm_nsegs;
1186 seg++, nexttx = WM_NEXTTX(nexttx)) {
1187 /*
1188 * Note: we currently only use 32-bit DMA
1189 * addresses.
1190 */
1191 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1192 htole32(dmamap->dm_segs[seg].ds_addr);
1193 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1194 htole32(dmamap->dm_segs[seg].ds_len);
1195 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1196 cksumfields;
1197 lasttx = nexttx;
1198
1199 DPRINTF(WM_DEBUG_TX,
1200 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1201 sc->sc_dev.dv_xname, nexttx,
1202 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1203 (uint32_t) dmamap->dm_segs[seg].ds_len));
1204 }
1205
1206 /*
1207 * Set up the command byte on the last descriptor of
1208 * the packet. If we're in the interrupt delay window,
1209 * delay the interrupt.
1210 */
1211 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1212 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1213
1214 #if 0 /* XXXJRT */
1215 /*
1216 * If VLANs are enabled and the packet has a VLAN tag, set
1217 * up the descriptor to encapsulate the packet for us.
1218 *
1219 * This is only valid on the last descriptor of the packet.
1220 */
1221 if (sc->sc_ethercom.ec_nvlans != 0 &&
1222 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1223 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1224 htole32(WTX_CMD_VLE);
1225 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1226 = htole16(*mtod(m, int *) & 0xffff);
1227 }
1228 #endif /* XXXJRT */
1229
1230 txs->txs_lastdesc = lasttx;
1231
1232 DPRINTF(WM_DEBUG_TX,
1233 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1234 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1235
1236 /* Sync the descriptors we're using. */
1237 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1238 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1239
1240 /* Give the packet to the chip. */
1241 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1242
1243 DPRINTF(WM_DEBUG_TX,
1244 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1245
1246 DPRINTF(WM_DEBUG_TX,
1247 ("%s: TX: finished transmitting packet, job %d\n",
1248 sc->sc_dev.dv_xname, sc->sc_txsnext));
1249
1250 /* Advance the tx pointer. */
1251 sc->sc_txfree -= txs->txs_ndesc;
1252 sc->sc_txnext = nexttx;
1253
1254 sc->sc_txsfree--;
1255 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1256
1257 #if NBPFILTER > 0
1258 /* Pass the packet to any BPF listeners. */
1259 if (ifp->if_bpf)
1260 bpf_mtap(ifp->if_bpf, m0);
1261 #endif /* NBPFILTER > 0 */
1262 }
1263
1264 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
1265 /* No more slots; notify upper layer. */
1266 ifp->if_flags |= IFF_OACTIVE;
1267 }
1268
1269 if (sc->sc_txfree != ofree) {
1270 /* Set a watchdog timer in case the chip flakes out. */
1271 ifp->if_timer = 5;
1272 }
1273 }
1274
1275 /*
1276 * wm_watchdog: [ifnet interface function]
1277 *
1278 * Watchdog timer handler.
1279 */
1280 void
1281 wm_watchdog(struct ifnet *ifp)
1282 {
1283 struct wm_softc *sc = ifp->if_softc;
1284
1285 /*
1286 * Since we're using delayed interrupts, sweep up
1287 * before we report an error.
1288 */
1289 wm_txintr(sc);
1290
1291 if (sc->sc_txfree != WM_NTXDESC) {
1292 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1293 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1294 sc->sc_txnext);
1295 ifp->if_oerrors++;
1296
1297 /* Reset the interface. */
1298 (void) wm_init(ifp);
1299 }
1300
1301 /* Try to get more packets going. */
1302 wm_start(ifp);
1303 }
1304
1305 /*
1306 * wm_ioctl: [ifnet interface function]
1307 *
1308 * Handle control requests from the operator.
1309 */
1310 int
1311 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1312 {
1313 struct wm_softc *sc = ifp->if_softc;
1314 struct ifreq *ifr = (struct ifreq *) data;
1315 int s, error;
1316
1317 s = splnet();
1318
1319 switch (cmd) {
1320 case SIOCSIFMEDIA:
1321 case SIOCGIFMEDIA:
1322 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1323 break;
1324
1325 default:
1326 error = ether_ioctl(ifp, cmd, data);
1327 if (error == ENETRESET) {
1328 /*
1329 * Multicast list has changed; set the hardware filter
1330 * accordingly.
1331 */
1332 wm_set_filter(sc);
1333 error = 0;
1334 }
1335 break;
1336 }
1337
1338 /* Try to get more packets going. */
1339 wm_start(ifp);
1340
1341 splx(s);
1342 return (error);
1343 }
1344
1345 /*
1346 * wm_intr:
1347 *
1348 * Interrupt service routine.
1349 */
1350 int
1351 wm_intr(void *arg)
1352 {
1353 struct wm_softc *sc = arg;
1354 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1355 uint32_t icr;
1356 int wantinit, handled = 0;
1357
1358 for (wantinit = 0; wantinit == 0;) {
1359 icr = CSR_READ(sc, WMREG_ICR);
1360 if ((icr & sc->sc_icr) == 0)
1361 break;
1362
1363 handled = 1;
1364
1365 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1366 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1367 DPRINTF(WM_DEBUG_RX,
1368 ("%s: RX: got Rx intr 0x%08x\n",
1369 sc->sc_dev.dv_xname,
1370 icr & (ICR_RXDMT0|ICR_RXT0)));
1371 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1372 }
1373 #endif
1374 wm_rxintr(sc);
1375
1376 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
1377 if (icr & ICR_TXDW) {
1378 DPRINTF(WM_DEBUG_TX,
1379 ("%s: TX: got TDXW interrupt\n",
1380 sc->sc_dev.dv_xname));
1381 WM_EVCNT_INCR(&sc->sc_ev_txdw);
1382 }
1383 #endif
1384 wm_txintr(sc);
1385
1386 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1387 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1388 wm_linkintr(sc, icr);
1389 }
1390
1391 if (icr & ICR_RXO) {
1392 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1393 wantinit = 1;
1394 }
1395 }
1396
1397 if (handled) {
1398 if (wantinit)
1399 wm_init(ifp);
1400
1401 /* Try to get more packets going. */
1402 wm_start(ifp);
1403 }
1404
1405 return (handled);
1406 }
1407
1408 /*
1409 * wm_txintr:
1410 *
1411 * Helper; handle transmit interrupts.
1412 */
1413 void
1414 wm_txintr(struct wm_softc *sc)
1415 {
1416 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1417 struct wm_txsoft *txs;
1418 uint8_t status;
1419 int i;
1420
1421 ifp->if_flags &= ~IFF_OACTIVE;
1422
1423 /*
1424 * Go through the Tx list and free mbufs for those
1425 * frams which have been transmitted.
1426 */
1427 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1428 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1429 txs = &sc->sc_txsoft[i];
1430
1431 DPRINTF(WM_DEBUG_TX,
1432 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1433
1434 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1435 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1436
1437 status = le32toh(sc->sc_txdescs[
1438 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1439 if ((status & WTX_ST_DD) == 0)
1440 break;
1441
1442 DPRINTF(WM_DEBUG_TX,
1443 ("%s: TX: job %d done: descs %d..%d\n",
1444 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1445 txs->txs_lastdesc));
1446
1447 /*
1448 * XXX We should probably be using the statistics
1449 * XXX registers, but I don't know if they exist
1450 * XXX on chips before the Cordova.
1451 */
1452
1453 #ifdef WM_EVENT_COUNTERS
1454 if (status & WTX_ST_TU)
1455 WM_EVCNT_INCR(&sc->sc_ev_tu);
1456 #endif /* WM_EVENT_COUNTERS */
1457
1458 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1459 ifp->if_oerrors++;
1460 if (status & WTX_ST_LC)
1461 printf("%s: late collision\n",
1462 sc->sc_dev.dv_xname);
1463 else if (status & WTX_ST_EC) {
1464 ifp->if_collisions += 16;
1465 printf("%s: excessive collisions\n",
1466 sc->sc_dev.dv_xname);
1467 }
1468 } else
1469 ifp->if_opackets++;
1470
1471 sc->sc_txfree += txs->txs_ndesc;
1472 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1473 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1474 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1475 m_freem(txs->txs_mbuf);
1476 txs->txs_mbuf = NULL;
1477 }
1478
1479 /* Update the dirty transmit buffer pointer. */
1480 sc->sc_txsdirty = i;
1481 DPRINTF(WM_DEBUG_TX,
1482 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1483
1484 /*
1485 * If there are no more pending transmissions, cancel the watchdog
1486 * timer.
1487 */
1488 if (sc->sc_txsfree == WM_TXQUEUELEN)
1489 ifp->if_timer = 0;
1490 }
1491
1492 /*
1493 * wm_rxintr:
1494 *
1495 * Helper; handle receive interrupts.
1496 */
1497 void
1498 wm_rxintr(struct wm_softc *sc)
1499 {
1500 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1501 struct wm_rxsoft *rxs;
1502 struct mbuf *m;
1503 int i, len;
1504 uint8_t status, errors;
1505
1506 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1507 rxs = &sc->sc_rxsoft[i];
1508
1509 DPRINTF(WM_DEBUG_RX,
1510 ("%s: RX: checking descriptor %d\n",
1511 sc->sc_dev.dv_xname, i));
1512
1513 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1514
1515 status = sc->sc_rxdescs[i].wrx_status;
1516 errors = sc->sc_rxdescs[i].wrx_errors;
1517 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1518
1519 if ((status & WRX_ST_DD) == 0) {
1520 /*
1521 * We have processed all of the receive descriptors.
1522 */
1523 break;
1524 }
1525
1526 if (__predict_false(sc->sc_rxdiscard)) {
1527 DPRINTF(WM_DEBUG_RX,
1528 ("%s: RX: discarding contents of descriptor %d\n",
1529 sc->sc_dev.dv_xname, i));
1530 WM_INIT_RXDESC(sc, i);
1531 if (status & WRX_ST_EOP) {
1532 /* Reset our state. */
1533 DPRINTF(WM_DEBUG_RX,
1534 ("%s: RX: resetting rxdiscard -> 0\n",
1535 sc->sc_dev.dv_xname));
1536 sc->sc_rxdiscard = 0;
1537 }
1538 continue;
1539 }
1540
1541 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1542 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1543
1544 m = rxs->rxs_mbuf;
1545
1546 /*
1547 * Add a new receive buffer to the ring.
1548 */
1549 if (wm_add_rxbuf(sc, i) != 0) {
1550 /*
1551 * Failed, throw away what we've done so
1552 * far, and discard the rest of the packet.
1553 */
1554 ifp->if_ierrors++;
1555 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1556 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1557 WM_INIT_RXDESC(sc, i);
1558 if ((status & WRX_ST_EOP) == 0)
1559 sc->sc_rxdiscard = 1;
1560 if (sc->sc_rxhead != NULL)
1561 m_freem(sc->sc_rxhead);
1562 WM_RXCHAIN_RESET(sc);
1563 DPRINTF(WM_DEBUG_RX,
1564 ("%s: RX: Rx buffer allocation failed, "
1565 "dropping packet%s\n", sc->sc_dev.dv_xname,
1566 sc->sc_rxdiscard ? " (discard)" : ""));
1567 continue;
1568 }
1569
1570 WM_RXCHAIN_LINK(sc, m);
1571
1572 m->m_len = len;
1573
1574 DPRINTF(WM_DEBUG_RX,
1575 ("%s: RX: buffer at %p len %d\n",
1576 sc->sc_dev.dv_xname, m->m_data, len));
1577
1578 /*
1579 * If this is not the end of the packet, keep
1580 * looking.
1581 */
1582 if ((status & WRX_ST_EOP) == 0) {
1583 sc->sc_rxlen += len;
1584 DPRINTF(WM_DEBUG_RX,
1585 ("%s: RX: not yet EOP, rxlen -> %d\n",
1586 sc->sc_dev.dv_xname, sc->sc_rxlen));
1587 continue;
1588 }
1589
1590 /*
1591 * Okay, we have the entire packet now...
1592 */
1593 *sc->sc_rxtailp = NULL;
1594 m = sc->sc_rxhead;
1595 len += sc->sc_rxlen;
1596
1597 WM_RXCHAIN_RESET(sc);
1598
1599 DPRINTF(WM_DEBUG_RX,
1600 ("%s: RX: have entire packet, len -> %d\n",
1601 sc->sc_dev.dv_xname, len));
1602
1603 /*
1604 * If an error occurred, update stats and drop the packet.
1605 */
1606 if (errors &
1607 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1608 ifp->if_ierrors++;
1609 if (errors & WRX_ER_SE)
1610 printf("%s: symbol error\n",
1611 sc->sc_dev.dv_xname);
1612 else if (errors & WRX_ER_SEQ)
1613 printf("%s: receive sequence error\n",
1614 sc->sc_dev.dv_xname);
1615 else if (errors & WRX_ER_CE)
1616 printf("%s: CRC error\n",
1617 sc->sc_dev.dv_xname);
1618 m_freem(m);
1619 continue;
1620 }
1621
1622 /*
1623 * No errors. Receive the packet.
1624 *
1625 * Note, we have configured the chip to include the
1626 * CRC with every packet.
1627 */
1628 m->m_flags |= M_HASFCS;
1629 m->m_pkthdr.rcvif = ifp;
1630 m->m_pkthdr.len = len;
1631
1632 #if 0 /* XXXJRT */
1633 /*
1634 * If VLANs are enabled, VLAN packets have been unwrapped
1635 * for us. Associate the tag with the packet.
1636 */
1637 if (sc->sc_ethercom.ec_nvlans != 0 &&
1638 (status & WRX_ST_VP) != 0) {
1639 struct mbuf *vtag;
1640
1641 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1642 if (vtag == NULL) {
1643 ifp->if_ierrors++;
1644 printf("%s: unable to allocate VLAN tag\n",
1645 sc->sc_dev.dv_xname);
1646 m_freem(m);
1647 continue;
1648 }
1649
1650 *mtod(m, int *) =
1651 le16toh(sc->sc_rxdescs[i].wrx_special);
1652 vtag->m_len = sizeof(int);
1653 }
1654 #endif /* XXXJRT */
1655
1656 /*
1657 * Set up checksum info for this packet.
1658 */
1659 if (status & WRX_ST_IPCS) {
1660 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1661 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1662 if (errors & WRX_ER_IPE)
1663 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1664 }
1665 if (status & WRX_ST_TCPCS) {
1666 /*
1667 * Note: we don't know if this was TCP or UDP,
1668 * so we just set both bits, and expect the
1669 * upper layers to deal.
1670 */
1671 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1672 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1673 if (errors & WRX_ER_TCPE)
1674 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1675 }
1676
1677 ifp->if_ipackets++;
1678
1679 #if NBPFILTER > 0
1680 /* Pass this up to any BPF listeners. */
1681 if (ifp->if_bpf)
1682 bpf_mtap(ifp->if_bpf, m);
1683 #endif /* NBPFILTER > 0 */
1684
1685 /* Pass it on. */
1686 (*ifp->if_input)(ifp, m);
1687 }
1688
1689 /* Update the receive pointer. */
1690 sc->sc_rxptr = i;
1691
1692 DPRINTF(WM_DEBUG_RX,
1693 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1694 }
1695
1696 /*
1697 * wm_linkintr:
1698 *
1699 * Helper; handle link interrupts.
1700 */
1701 void
1702 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1703 {
1704 uint32_t status;
1705
1706 /*
1707 * If we get a link status interrupt on a 1000BASE-T
1708 * device, just fall into the normal MII tick path.
1709 */
1710 if (sc->sc_flags & WM_F_HAS_MII) {
1711 if (icr & ICR_LSC) {
1712 DPRINTF(WM_DEBUG_LINK,
1713 ("%s: LINK: LSC -> mii_tick\n",
1714 sc->sc_dev.dv_xname));
1715 mii_tick(&sc->sc_mii);
1716 } else if (icr & ICR_RXSEQ) {
1717 DPRINTF(WM_DEBUG_LINK,
1718 ("%s: LINK Receive sequence error\n",
1719 sc->sc_dev.dv_xname));
1720 }
1721 return;
1722 }
1723
1724 /*
1725 * If we are now receiving /C/, check for link again in
1726 * a couple of link clock ticks.
1727 */
1728 if (icr & ICR_RXCFG) {
1729 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1730 sc->sc_dev.dv_xname));
1731 sc->sc_tbi_anstate = 2;
1732 }
1733
1734 if (icr & ICR_LSC) {
1735 status = CSR_READ(sc, WMREG_STATUS);
1736 if (status & STATUS_LU) {
1737 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1738 sc->sc_dev.dv_xname,
1739 (status & STATUS_FD) ? "FDX" : "HDX"));
1740 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1741 if (status & STATUS_FD)
1742 sc->sc_tctl |=
1743 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1744 else
1745 sc->sc_tctl |=
1746 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1747 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1748 sc->sc_tbi_linkup = 1;
1749 } else {
1750 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1751 sc->sc_dev.dv_xname));
1752 sc->sc_tbi_linkup = 0;
1753 }
1754 sc->sc_tbi_anstate = 2;
1755 wm_tbi_set_linkled(sc);
1756 } else if (icr & ICR_RXSEQ) {
1757 DPRINTF(WM_DEBUG_LINK,
1758 ("%s: LINK: Receive sequence error\n",
1759 sc->sc_dev.dv_xname));
1760 }
1761 }
1762
1763 /*
1764 * wm_tick:
1765 *
1766 * One second timer, used to check link status, sweep up
1767 * completed transmit jobs, etc.
1768 */
1769 void
1770 wm_tick(void *arg)
1771 {
1772 struct wm_softc *sc = arg;
1773 int s;
1774
1775 s = splnet();
1776
1777 if (sc->sc_flags & WM_F_HAS_MII)
1778 mii_tick(&sc->sc_mii);
1779 else
1780 wm_tbi_check_link(sc);
1781
1782 splx(s);
1783
1784 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1785 }
1786
1787 /*
1788 * wm_reset:
1789 *
1790 * Reset the i82542 chip.
1791 */
1792 void
1793 wm_reset(struct wm_softc *sc)
1794 {
1795 int i;
1796
1797 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1798 delay(10000);
1799
1800 for (i = 0; i < 1000; i++) {
1801 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1802 return;
1803 delay(20);
1804 }
1805
1806 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1807 printf("%s: WARNING: reset failed to complete\n",
1808 sc->sc_dev.dv_xname);
1809 }
1810
1811 /*
1812 * wm_init: [ifnet interface function]
1813 *
1814 * Initialize the interface. Must be called at splnet().
1815 */
1816 int
1817 wm_init(struct ifnet *ifp)
1818 {
1819 struct wm_softc *sc = ifp->if_softc;
1820 struct wm_rxsoft *rxs;
1821 int i, error = 0;
1822 uint32_t reg;
1823
1824 /* Cancel any pending I/O. */
1825 wm_stop(ifp, 0);
1826
1827 /* Reset the chip to a known state. */
1828 wm_reset(sc);
1829
1830 /* Initialize the transmit descriptor ring. */
1831 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1832 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1833 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1834 sc->sc_txfree = WM_NTXDESC;
1835 sc->sc_txnext = 0;
1836
1837 sc->sc_txctx_ipcs = 0xffffffff;
1838 sc->sc_txctx_tucs = 0xffffffff;
1839
1840 if (sc->sc_type < WM_T_LIVENGOOD) {
1841 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1842 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1843 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1844 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1845 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1846 CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
1847 } else {
1848 CSR_WRITE(sc, WMREG_TBDAH, 0);
1849 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1850 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1851 CSR_WRITE(sc, WMREG_TDH, 0);
1852 CSR_WRITE(sc, WMREG_TDT, 0);
1853 CSR_WRITE(sc, WMREG_TIDV, 128);
1854
1855 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1856 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1857 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1858 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1859 }
1860 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1861 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1862
1863 /* Initialize the transmit job descriptors. */
1864 for (i = 0; i < WM_TXQUEUELEN; i++)
1865 sc->sc_txsoft[i].txs_mbuf = NULL;
1866 sc->sc_txsfree = WM_TXQUEUELEN;
1867 sc->sc_txsnext = 0;
1868 sc->sc_txsdirty = 0;
1869
1870 /*
1871 * Initialize the receive descriptor and receive job
1872 * descriptor rings.
1873 */
1874 if (sc->sc_type < WM_T_LIVENGOOD) {
1875 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1876 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1877 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1878 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1879 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1880 CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
1881
1882 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1883 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1884 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1885 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1886 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1887 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1888 } else {
1889 CSR_WRITE(sc, WMREG_RDBAH, 0);
1890 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1891 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1892 CSR_WRITE(sc, WMREG_RDH, 0);
1893 CSR_WRITE(sc, WMREG_RDT, 0);
1894 CSR_WRITE(sc, WMREG_RDTR, 28 | RDTR_FPD);
1895 }
1896 for (i = 0; i < WM_NRXDESC; i++) {
1897 rxs = &sc->sc_rxsoft[i];
1898 if (rxs->rxs_mbuf == NULL) {
1899 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1900 printf("%s: unable to allocate or map rx "
1901 "buffer %d, error = %d\n",
1902 sc->sc_dev.dv_xname, i, error);
1903 /*
1904 * XXX Should attempt to run with fewer receive
1905 * XXX buffers instead of just failing.
1906 */
1907 wm_rxdrain(sc);
1908 goto out;
1909 }
1910 } else
1911 WM_INIT_RXDESC(sc, i);
1912 }
1913 sc->sc_rxptr = 0;
1914 sc->sc_rxdiscard = 0;
1915 WM_RXCHAIN_RESET(sc);
1916
1917 /*
1918 * Clear out the VLAN table -- we don't use it (yet).
1919 */
1920 CSR_WRITE(sc, WMREG_VET, 0);
1921 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1922 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1923
1924 /*
1925 * Set up flow-control parameters.
1926 *
1927 * XXX Values could probably stand some tuning.
1928 */
1929 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1930 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1931 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1932 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1933
1934 if (sc->sc_type < WM_T_LIVENGOOD) {
1935 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1936 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1937 } else {
1938 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1939 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1940 }
1941 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1942 }
1943
1944 #if 0 /* XXXJRT */
1945 /* Deal with VLAN enables. */
1946 if (sc->sc_ethercom.ec_nvlans != 0)
1947 sc->sc_ctrl |= CTRL_VME;
1948 else
1949 #endif /* XXXJRT */
1950 sc->sc_ctrl &= ~CTRL_VME;
1951
1952 /* Write the control registers. */
1953 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1954 #if 0
1955 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1956 #endif
1957
1958 /*
1959 * Set up checksum offload parameters.
1960 */
1961 reg = CSR_READ(sc, WMREG_RXCSUM);
1962 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1963 reg |= RXCSUM_IPOFL;
1964 else
1965 reg &= ~RXCSUM_IPOFL;
1966 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1967 reg |= RXCSUM_TUOFL;
1968 else
1969 reg &= ~RXCSUM_TUOFL;
1970 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1971
1972 /*
1973 * Set up the interrupt registers.
1974 */
1975 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1976 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1977 ICR_RXO | ICR_RXT0;
1978 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1979 sc->sc_icr |= ICR_RXCFG;
1980 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1981
1982 /* Set up the inter-packet gap. */
1983 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1984
1985 #if 0 /* XXXJRT */
1986 /* Set the VLAN ethernetype. */
1987 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1988 #endif
1989
1990 /*
1991 * Set up the transmit control register; we start out with
1992 * a collision distance suitable for FDX, but update it whe
1993 * we resolve the media type.
1994 */
1995 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
1996 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1997 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1998
1999 /* Set the media. */
2000 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
2001
2002 /*
2003 * Set up the receive control register; we actually program
2004 * the register when we set the receive filter. Use multicast
2005 * address offset type 0.
2006 *
2007 * Only the Cordova has the ability to strip the incoming
2008 * CRC, so we don't enable that feature.
2009 */
2010 sc->sc_mchash_type = 0;
2011 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
2012 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
2013
2014 /* Set the receive filter. */
2015 wm_set_filter(sc);
2016
2017 /* Start the one second link check clock. */
2018 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2019
2020 /* ...all done! */
2021 ifp->if_flags |= IFF_RUNNING;
2022 ifp->if_flags &= ~IFF_OACTIVE;
2023
2024 out:
2025 if (error)
2026 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
2027 return (error);
2028 }
2029
2030 /*
2031 * wm_rxdrain:
2032 *
2033 * Drain the receive queue.
2034 */
2035 void
2036 wm_rxdrain(struct wm_softc *sc)
2037 {
2038 struct wm_rxsoft *rxs;
2039 int i;
2040
2041 for (i = 0; i < WM_NRXDESC; i++) {
2042 rxs = &sc->sc_rxsoft[i];
2043 if (rxs->rxs_mbuf != NULL) {
2044 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2045 m_freem(rxs->rxs_mbuf);
2046 rxs->rxs_mbuf = NULL;
2047 }
2048 }
2049 }
2050
2051 /*
2052 * wm_stop: [ifnet interface function]
2053 *
2054 * Stop transmission on the interface.
2055 */
2056 void
2057 wm_stop(struct ifnet *ifp, int disable)
2058 {
2059 struct wm_softc *sc = ifp->if_softc;
2060 struct wm_txsoft *txs;
2061 int i;
2062
2063 /* Stop the one second clock. */
2064 callout_stop(&sc->sc_tick_ch);
2065
2066 if (sc->sc_flags & WM_F_HAS_MII) {
2067 /* Down the MII. */
2068 mii_down(&sc->sc_mii);
2069 }
2070
2071 /* Stop the transmit and receive processes. */
2072 CSR_WRITE(sc, WMREG_TCTL, 0);
2073 CSR_WRITE(sc, WMREG_RCTL, 0);
2074
2075 /* Release any queued transmit buffers. */
2076 for (i = 0; i < WM_TXQUEUELEN; i++) {
2077 txs = &sc->sc_txsoft[i];
2078 if (txs->txs_mbuf != NULL) {
2079 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2080 m_freem(txs->txs_mbuf);
2081 txs->txs_mbuf = NULL;
2082 }
2083 }
2084
2085 if (disable)
2086 wm_rxdrain(sc);
2087
2088 /* Mark the interface as down and cancel the watchdog timer. */
2089 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2090 ifp->if_timer = 0;
2091 }
2092
2093 /*
2094 * wm_read_eeprom:
2095 *
2096 * Read data from the serial EEPROM.
2097 */
2098 void
2099 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2100 {
2101 uint32_t reg;
2102 int i, x;
2103
2104 for (i = 0; i < wordcnt; i++) {
2105 /* Send CHIP SELECT for one clock tick. */
2106 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2107 delay(2);
2108
2109 /* Shift in the READ command. */
2110 for (x = 3; x > 0; x--) {
2111 reg = EECD_CS;
2112 if (UWIRE_OPC_READ & (1 << (x - 1)))
2113 reg |= EECD_DI;
2114 CSR_WRITE(sc, WMREG_EECD, reg);
2115 delay(2);
2116 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2117 delay(2);
2118 CSR_WRITE(sc, WMREG_EECD, reg);
2119 delay(2);
2120 }
2121
2122 /* Shift in address. */
2123 for (x = 6; x > 0; x--) {
2124 reg = EECD_CS;
2125 if ((word + i) & (1 << (x - 1)))
2126 reg |= EECD_DI;
2127 CSR_WRITE(sc, WMREG_EECD, reg);
2128 delay(2);
2129 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2130 delay(2);
2131 CSR_WRITE(sc, WMREG_EECD, reg);
2132 delay(2);
2133 }
2134
2135 /* Shift out the data. */
2136 reg = EECD_CS;
2137 data[i] = 0;
2138 for (x = 16; x > 0; x--) {
2139 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2140 delay(2);
2141 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2142 data[i] |= (1 << (x - 1));
2143 CSR_WRITE(sc, WMREG_EECD, reg);
2144 delay(2);
2145 }
2146
2147 /* Clear CHIP SELECT. */
2148 CSR_WRITE(sc, WMREG_EECD, 0);
2149 }
2150 }
2151
2152 /*
2153 * wm_add_rxbuf:
2154 *
2155 * Add a receive buffer to the indiciated descriptor.
2156 */
2157 int
2158 wm_add_rxbuf(struct wm_softc *sc, int idx)
2159 {
2160 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2161 struct mbuf *m;
2162 int error;
2163
2164 MGETHDR(m, M_DONTWAIT, MT_DATA);
2165 if (m == NULL)
2166 return (ENOBUFS);
2167
2168 MCLGET(m, M_DONTWAIT);
2169 if ((m->m_flags & M_EXT) == 0) {
2170 m_freem(m);
2171 return (ENOBUFS);
2172 }
2173
2174 if (rxs->rxs_mbuf != NULL)
2175 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2176
2177 rxs->rxs_mbuf = m;
2178
2179 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2180 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2181 BUS_DMA_READ|BUS_DMA_NOWAIT);
2182 if (error) {
2183 printf("%s: unable to load rx DMA map %d, error = %d\n",
2184 sc->sc_dev.dv_xname, idx, error);
2185 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2186 }
2187
2188 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2189 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2190
2191 WM_INIT_RXDESC(sc, idx);
2192
2193 return (0);
2194 }
2195
2196 /*
2197 * wm_set_ral:
2198 *
2199 * Set an entery in the receive address list.
2200 */
2201 static void
2202 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2203 {
2204 uint32_t ral_lo, ral_hi;
2205
2206 if (enaddr != NULL) {
2207 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2208 (enaddr[3] << 24);
2209 ral_hi = enaddr[4] | (enaddr[5] << 8);
2210 ral_hi |= RAL_AV;
2211 } else {
2212 ral_lo = 0;
2213 ral_hi = 0;
2214 }
2215
2216 if (sc->sc_type >= WM_T_CORDOVA) {
2217 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2218 ral_lo);
2219 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2220 ral_hi);
2221 } else {
2222 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2223 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2224 }
2225 }
2226
2227 /*
2228 * wm_mchash:
2229 *
2230 * Compute the hash of the multicast address for the 4096-bit
2231 * multicast filter.
2232 */
2233 static uint32_t
2234 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2235 {
2236 static const int lo_shift[4] = { 4, 3, 2, 0 };
2237 static const int hi_shift[4] = { 4, 5, 6, 8 };
2238 uint32_t hash;
2239
2240 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2241 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2242
2243 return (hash & 0xfff);
2244 }
2245
2246 /*
2247 * wm_set_filter:
2248 *
2249 * Set up the receive filter.
2250 */
2251 void
2252 wm_set_filter(struct wm_softc *sc)
2253 {
2254 struct ethercom *ec = &sc->sc_ethercom;
2255 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2256 struct ether_multi *enm;
2257 struct ether_multistep step;
2258 bus_addr_t mta_reg;
2259 uint32_t hash, reg, bit;
2260 int i;
2261
2262 if (sc->sc_type >= WM_T_CORDOVA)
2263 mta_reg = WMREG_CORDOVA_MTA;
2264 else
2265 mta_reg = WMREG_MTA;
2266
2267 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2268
2269 if (ifp->if_flags & IFF_BROADCAST)
2270 sc->sc_rctl |= RCTL_BAM;
2271 if (ifp->if_flags & IFF_PROMISC) {
2272 sc->sc_rctl |= RCTL_UPE;
2273 goto allmulti;
2274 }
2275
2276 /*
2277 * Set the station address in the first RAL slot, and
2278 * clear the remaining slots.
2279 */
2280 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2281 for (i = 1; i < WM_RAL_TABSIZE; i++)
2282 wm_set_ral(sc, NULL, i);
2283
2284 /* Clear out the multicast table. */
2285 for (i = 0; i < WM_MC_TABSIZE; i++)
2286 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2287
2288 ETHER_FIRST_MULTI(step, ec, enm);
2289 while (enm != NULL) {
2290 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2291 /*
2292 * We must listen to a range of multicast addresses.
2293 * For now, just accept all multicasts, rather than
2294 * trying to set only those filter bits needed to match
2295 * the range. (At this time, the only use of address
2296 * ranges is for IP multicast routing, for which the
2297 * range is big enough to require all bits set.)
2298 */
2299 goto allmulti;
2300 }
2301
2302 hash = wm_mchash(sc, enm->enm_addrlo);
2303
2304 reg = (hash >> 5) & 0x7f;
2305 bit = hash & 0x1f;
2306
2307 hash = CSR_READ(sc, mta_reg + (reg << 2));
2308 hash |= 1U << bit;
2309
2310 /* XXX Hardware bug?? */
2311 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2312 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2313 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2314 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2315 } else
2316 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2317
2318 ETHER_NEXT_MULTI(step, enm);
2319 }
2320
2321 ifp->if_flags &= ~IFF_ALLMULTI;
2322 goto setit;
2323
2324 allmulti:
2325 ifp->if_flags |= IFF_ALLMULTI;
2326 sc->sc_rctl |= RCTL_MPE;
2327
2328 setit:
2329 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2330 }
2331
2332 /*
2333 * wm_tbi_mediainit:
2334 *
2335 * Initialize media for use on 1000BASE-X devices.
2336 */
2337 void
2338 wm_tbi_mediainit(struct wm_softc *sc)
2339 {
2340 const char *sep = "";
2341
2342 if (sc->sc_type < WM_T_LIVENGOOD)
2343 sc->sc_tipg = TIPG_WM_DFLT;
2344 else
2345 sc->sc_tipg = TIPG_LG_DFLT;
2346
2347 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2348 wm_tbi_mediastatus);
2349
2350 /*
2351 * SWD Pins:
2352 *
2353 * 0 = Link LED (output)
2354 * 1 = Loss Of Signal (input)
2355 */
2356 sc->sc_ctrl |= CTRL_SWDPIO(0);
2357 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2358
2359 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2360
2361 #define ADD(s, m, d) \
2362 do { \
2363 printf("%s%s", sep, s); \
2364 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2365 sep = ", "; \
2366 } while (/*CONSTCOND*/0)
2367
2368 printf("%s: ", sc->sc_dev.dv_xname);
2369 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2370 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2371 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2372 printf("\n");
2373
2374 #undef ADD
2375
2376 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2377 }
2378
2379 /*
2380 * wm_tbi_mediastatus: [ifmedia interface function]
2381 *
2382 * Get the current interface media status on a 1000BASE-X device.
2383 */
2384 void
2385 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2386 {
2387 struct wm_softc *sc = ifp->if_softc;
2388
2389 ifmr->ifm_status = IFM_AVALID;
2390 ifmr->ifm_active = IFM_ETHER;
2391
2392 if (sc->sc_tbi_linkup == 0) {
2393 ifmr->ifm_active |= IFM_NONE;
2394 return;
2395 }
2396
2397 ifmr->ifm_status |= IFM_ACTIVE;
2398 ifmr->ifm_active |= IFM_1000_SX;
2399 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2400 ifmr->ifm_active |= IFM_FDX;
2401 }
2402
2403 /*
2404 * wm_tbi_mediachange: [ifmedia interface function]
2405 *
2406 * Set hardware to newly-selected media on a 1000BASE-X device.
2407 */
2408 int
2409 wm_tbi_mediachange(struct ifnet *ifp)
2410 {
2411 struct wm_softc *sc = ifp->if_softc;
2412 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2413 uint32_t status;
2414 int i;
2415
2416 sc->sc_txcw = ife->ifm_data;
2417 if (sc->sc_ctrl & CTRL_RFCE)
2418 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2419 if (sc->sc_ctrl & CTRL_TFCE)
2420 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2421 sc->sc_txcw |= TXCW_ANE;
2422
2423 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2424 delay(10000);
2425
2426 sc->sc_tbi_anstate = 0;
2427
2428 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2429 /* Have signal; wait for the link to come up. */
2430 for (i = 0; i < 50; i++) {
2431 delay(10000);
2432 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2433 break;
2434 }
2435
2436 status = CSR_READ(sc, WMREG_STATUS);
2437 if (status & STATUS_LU) {
2438 /* Link is up. */
2439 DPRINTF(WM_DEBUG_LINK,
2440 ("%s: LINK: set media -> link up %s\n",
2441 sc->sc_dev.dv_xname,
2442 (status & STATUS_FD) ? "FDX" : "HDX"));
2443 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2444 if (status & STATUS_FD)
2445 sc->sc_tctl |=
2446 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2447 else
2448 sc->sc_tctl |=
2449 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2450 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2451 sc->sc_tbi_linkup = 1;
2452 } else {
2453 /* Link is down. */
2454 DPRINTF(WM_DEBUG_LINK,
2455 ("%s: LINK: set media -> link down\n",
2456 sc->sc_dev.dv_xname));
2457 sc->sc_tbi_linkup = 0;
2458 }
2459 } else {
2460 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2461 sc->sc_dev.dv_xname));
2462 sc->sc_tbi_linkup = 0;
2463 }
2464
2465 wm_tbi_set_linkled(sc);
2466
2467 return (0);
2468 }
2469
2470 /*
2471 * wm_tbi_set_linkled:
2472 *
2473 * Update the link LED on 1000BASE-X devices.
2474 */
2475 void
2476 wm_tbi_set_linkled(struct wm_softc *sc)
2477 {
2478
2479 if (sc->sc_tbi_linkup)
2480 sc->sc_ctrl |= CTRL_SWDPIN(0);
2481 else
2482 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2483
2484 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2485 }
2486
2487 /*
2488 * wm_tbi_check_link:
2489 *
2490 * Check the link on 1000BASE-X devices.
2491 */
2492 void
2493 wm_tbi_check_link(struct wm_softc *sc)
2494 {
2495 uint32_t rxcw, ctrl, status;
2496
2497 if (sc->sc_tbi_anstate == 0)
2498 return;
2499 else if (sc->sc_tbi_anstate > 1) {
2500 DPRINTF(WM_DEBUG_LINK,
2501 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2502 sc->sc_tbi_anstate));
2503 sc->sc_tbi_anstate--;
2504 return;
2505 }
2506
2507 sc->sc_tbi_anstate = 0;
2508
2509 rxcw = CSR_READ(sc, WMREG_RXCW);
2510 ctrl = CSR_READ(sc, WMREG_CTRL);
2511 status = CSR_READ(sc, WMREG_STATUS);
2512
2513 if ((status & STATUS_LU) == 0) {
2514 DPRINTF(WM_DEBUG_LINK,
2515 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2516 sc->sc_tbi_linkup = 0;
2517 } else {
2518 DPRINTF(WM_DEBUG_LINK,
2519 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2520 (status & STATUS_FD) ? "FDX" : "HDX"));
2521 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2522 if (status & STATUS_FD)
2523 sc->sc_tctl |=
2524 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2525 else
2526 sc->sc_tctl |=
2527 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2528 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2529 sc->sc_tbi_linkup = 1;
2530 }
2531
2532 wm_tbi_set_linkled(sc);
2533 }
2534
2535 /*
2536 * wm_gmii_reset:
2537 *
2538 * Reset the PHY.
2539 */
2540 void
2541 wm_gmii_reset(struct wm_softc *sc)
2542 {
2543 uint32_t reg;
2544
2545 if (sc->sc_type >= WM_T_CORDOVA) {
2546 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2547 delay(20000);
2548
2549 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2550 delay(20000);
2551 } else {
2552 /* The PHY reset pin is active-low. */
2553 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2554 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2555 CTRL_EXT_SWDPIN(4));
2556 reg |= CTRL_EXT_SWDPIO(4);
2557
2558 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2559 delay(10);
2560
2561 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2562 delay(10);
2563
2564 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2565 delay(10);
2566 #if 0
2567 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2568 #endif
2569 }
2570 }
2571
2572 /*
2573 * wm_gmii_mediainit:
2574 *
2575 * Initialize media for use on 1000BASE-T devices.
2576 */
2577 void
2578 wm_gmii_mediainit(struct wm_softc *sc)
2579 {
2580 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2581
2582 /* We have MII. */
2583 sc->sc_flags |= WM_F_HAS_MII;
2584
2585 sc->sc_tipg = TIPG_1000T_DFLT;
2586
2587 /*
2588 * Let the chip set speed/duplex on its own based on
2589 * signals from the PHY.
2590 */
2591 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2592 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2593
2594 /* Initialize our media structures and probe the GMII. */
2595 sc->sc_mii.mii_ifp = ifp;
2596
2597 if (sc->sc_type >= WM_T_CORDOVA) {
2598 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2599 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2600 } else {
2601 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2602 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2603 }
2604 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2605
2606 wm_gmii_reset(sc);
2607
2608 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2609 wm_gmii_mediastatus);
2610
2611 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2612 MII_OFFSET_ANY, 0);
2613 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2614 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2615 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2616 } else
2617 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2618 }
2619
2620 /*
2621 * wm_gmii_mediastatus: [ifmedia interface function]
2622 *
2623 * Get the current interface media status on a 1000BASE-T device.
2624 */
2625 void
2626 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2627 {
2628 struct wm_softc *sc = ifp->if_softc;
2629
2630 mii_pollstat(&sc->sc_mii);
2631 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2632 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2633 }
2634
2635 /*
2636 * wm_gmii_mediachange: [ifmedia interface function]
2637 *
2638 * Set hardware to newly-selected media on a 1000BASE-T device.
2639 */
2640 int
2641 wm_gmii_mediachange(struct ifnet *ifp)
2642 {
2643 struct wm_softc *sc = ifp->if_softc;
2644
2645 if (ifp->if_flags & IFF_UP)
2646 mii_mediachg(&sc->sc_mii);
2647 return (0);
2648 }
2649
2650 #define MDI_IO CTRL_SWDPIN(2)
2651 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2652 #define MDI_CLK CTRL_SWDPIN(3)
2653
2654 static void
2655 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2656 {
2657 uint32_t i, v;
2658
2659 v = CSR_READ(sc, WMREG_CTRL);
2660 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2661 v |= MDI_DIR | CTRL_SWDPIO(3);
2662
2663 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2664 if (data & i)
2665 v |= MDI_IO;
2666 else
2667 v &= ~MDI_IO;
2668 CSR_WRITE(sc, WMREG_CTRL, v);
2669 delay(10);
2670 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2671 delay(10);
2672 CSR_WRITE(sc, WMREG_CTRL, v);
2673 delay(10);
2674 }
2675 }
2676
2677 static uint32_t
2678 livengood_mii_recvbits(struct wm_softc *sc)
2679 {
2680 uint32_t v, i, data = 0;
2681
2682 v = CSR_READ(sc, WMREG_CTRL);
2683 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2684 v |= CTRL_SWDPIO(3);
2685
2686 CSR_WRITE(sc, WMREG_CTRL, v);
2687 delay(10);
2688 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2689 delay(10);
2690 CSR_WRITE(sc, WMREG_CTRL, v);
2691 delay(10);
2692
2693 for (i = 0; i < 16; i++) {
2694 data <<= 1;
2695 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2696 delay(10);
2697 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2698 data |= 1;
2699 CSR_WRITE(sc, WMREG_CTRL, v);
2700 delay(10);
2701 }
2702
2703 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2704 delay(10);
2705 CSR_WRITE(sc, WMREG_CTRL, v);
2706 delay(10);
2707
2708 return (data);
2709 }
2710
2711 #undef MDI_IO
2712 #undef MDI_DIR
2713 #undef MDI_CLK
2714
2715 /*
2716 * wm_gmii_livengood_readreg: [mii interface function]
2717 *
2718 * Read a PHY register on the GMII (Livengood version).
2719 */
2720 int
2721 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2722 {
2723 struct wm_softc *sc = (void *) self;
2724 int rv;
2725
2726 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2727 livengood_mii_sendbits(sc, reg | (phy << 5) |
2728 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2729 rv = livengood_mii_recvbits(sc) & 0xffff;
2730
2731 DPRINTF(WM_DEBUG_GMII,
2732 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2733 sc->sc_dev.dv_xname, phy, reg, rv));
2734
2735 return (rv);
2736 }
2737
2738 /*
2739 * wm_gmii_livengood_writereg: [mii interface function]
2740 *
2741 * Write a PHY register on the GMII (Livengood version).
2742 */
2743 void
2744 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2745 {
2746 struct wm_softc *sc = (void *) self;
2747
2748 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2749 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2750 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2751 (MII_COMMAND_START << 30), 32);
2752 }
2753
2754 /*
2755 * wm_gmii_cordova_readreg: [mii interface function]
2756 *
2757 * Read a PHY register on the GMII.
2758 */
2759 int
2760 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2761 {
2762 struct wm_softc *sc = (void *) self;
2763 uint32_t mdic;
2764 int i, rv;
2765
2766 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2767 MDIC_REGADD(reg));
2768
2769 for (i = 0; i < 100; i++) {
2770 mdic = CSR_READ(sc, WMREG_MDIC);
2771 if (mdic & MDIC_READY)
2772 break;
2773 delay(10);
2774 }
2775
2776 if ((mdic & MDIC_READY) == 0) {
2777 printf("%s: MDIC read timed out: phy %d reg %d\n",
2778 sc->sc_dev.dv_xname, phy, reg);
2779 rv = 0;
2780 } else if (mdic & MDIC_E) {
2781 #if 0 /* This is normal if no PHY is present. */
2782 printf("%s: MDIC read error: phy %d reg %d\n",
2783 sc->sc_dev.dv_xname, phy, reg);
2784 #endif
2785 rv = 0;
2786 } else {
2787 rv = MDIC_DATA(mdic);
2788 if (rv == 0xffff)
2789 rv = 0;
2790 }
2791
2792 return (rv);
2793 }
2794
2795 /*
2796 * wm_gmii_cordova_writereg: [mii interface function]
2797 *
2798 * Write a PHY register on the GMII.
2799 */
2800 void
2801 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2802 {
2803 struct wm_softc *sc = (void *) self;
2804 uint32_t mdic;
2805 int i;
2806
2807 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2808 MDIC_REGADD(reg) | MDIC_DATA(val));
2809
2810 for (i = 0; i < 100; i++) {
2811 mdic = CSR_READ(sc, WMREG_MDIC);
2812 if (mdic & MDIC_READY)
2813 break;
2814 delay(10);
2815 }
2816
2817 if ((mdic & MDIC_READY) == 0)
2818 printf("%s: MDIC write timed out: phy %d reg %d\n",
2819 sc->sc_dev.dv_xname, phy, reg);
2820 else if (mdic & MDIC_E)
2821 printf("%s: MDIC write error: phy %d reg %d\n",
2822 sc->sc_dev.dv_xname, phy, reg);
2823 }
2824
2825 /*
2826 * wm_gmii_statchg: [mii interface function]
2827 *
2828 * Callback from MII layer when media changes.
2829 */
2830 void
2831 wm_gmii_statchg(struct device *self)
2832 {
2833 struct wm_softc *sc = (void *) self;
2834
2835 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2836
2837 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2838 DPRINTF(WM_DEBUG_LINK,
2839 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2840 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2841 } else {
2842 DPRINTF(WM_DEBUG_LINK,
2843 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2844 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2845 }
2846
2847 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2848 }
2849