if_wm.c revision 1.3 1 /* $NetBSD: if_wm.c,v 1.3 2002/05/02 16:34:47 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 2001, 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Device driver for the Intel i82542 (``Wiseman''), i82543 (``Livengood''),
40 * and i82544 (``Cordova'') Gigabit Ethernet chips.
41 *
42 * TODO (in order of importance):
43 *
44 * - Fix hw VLAN assist.
45 *
46 * - Make GMII work on the Livengood.
47 *
48 * - Fix out-bound IP header checksums.
49 *
50 * - Fix UDP checksums.
51 *
52 * - Jumbo frames -- requires changes to network stack due to
53 * lame buffer length handling on chip.
54 *
55 * ...and, of course, performance tuning.
56 */
57
58 #include "bpfilter.h"
59
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/callout.h>
63 #include <sys/mbuf.h>
64 #include <sys/malloc.h>
65 #include <sys/kernel.h>
66 #include <sys/socket.h>
67 #include <sys/ioctl.h>
68 #include <sys/errno.h>
69 #include <sys/device.h>
70 #include <sys/queue.h>
71
72 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */
73
74 #include <net/if.h>
75 #include <net/if_dl.h>
76 #include <net/if_media.h>
77 #include <net/if_ether.h>
78
79 #if NBPFILTER > 0
80 #include <net/bpf.h>
81 #endif
82
83 #include <netinet/in.h> /* XXX for struct ip */
84 #include <netinet/in_systm.h> /* XXX for struct ip */
85 #include <netinet/ip.h> /* XXX for struct ip */
86
87 #include <machine/bus.h>
88 #include <machine/intr.h>
89 #include <machine/endian.h>
90
91 #include <dev/mii/mii.h>
92 #include <dev/mii/miivar.h>
93 #include <dev/mii/mii_bitbang.h>
94
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98
99 #include <dev/pci/if_wmreg.h>
100
101 #ifdef WM_DEBUG
102 #define WM_DEBUG_LINK 0x01
103 #define WM_DEBUG_TX 0x02
104 #define WM_DEBUG_RX 0x04
105 #define WM_DEBUG_GMII 0x08
106 int wm_debug = WM_DEBUG_TX|WM_DEBUG_RX|WM_DEBUG_LINK;
107
108 #define DPRINTF(x, y) if (wm_debug & (x)) printf y
109 #else
110 #define DPRINTF(x, y) /* nothing */
111 #endif /* WM_DEBUG */
112
113 /*
114 * Transmit descriptor list size. Due to errata, we can only have
115 * 256 hardware descriptors in the ring. We tell the upper layers
116 * that they can queue a lot of packets, and we go ahead and mange
117 * up to 32 of them at a time. We allow up to 16 DMA segments per
118 * packet.
119 */
120 #define WM_NTXSEGS 16
121 #define WM_IFQUEUELEN 256
122 #define WM_TXQUEUELEN 32
123 #define WM_TXQUEUELEN_MASK (WM_TXQUEUELEN - 1)
124 #define WM_NTXDESC 256
125 #define WM_NTXDESC_MASK (WM_NTXDESC - 1)
126 #define WM_NEXTTX(x) (((x) + 1) & WM_NTXDESC_MASK)
127 #define WM_NEXTTXS(x) (((x) + 1) & WM_TXQUEUELEN_MASK)
128
129 /*
130 * The interrupt mitigation feature of the Wiseman is pretty cool -- as
131 * long as you're transmitting, you don't have to take an interrupt at
132 * all. However, we force an interrupt to happen every N + 1 packets
133 * in order to kick us in a reasonable amount of time when we run out
134 * of descriptors.
135 */
136 #define WM_TXINTR_MASK 7
137
138 /*
139 * Receive descriptor list size. We have one Rx buffer for normal
140 * sized packets. Jumbo packets consume 5 Rx buffers for a full-sized
141 * packet. We allocate 128 receive descriptors, each with a 2k
142 * buffer (MCLBYTES), which gives us room for 25 jumbo packets.
143 */
144 #define WM_NRXDESC 128
145 #define WM_NRXDESC_MASK (WM_NRXDESC - 1)
146 #define WM_NEXTRX(x) (((x) + 1) & WM_NRXDESC_MASK)
147 #define WM_PREVRX(x) (((x) - 1) & WM_NRXDESC_MASK)
148
149 /*
150 * Control structures are DMA'd to the i82542 chip. We allocate them in
151 * a single clump that maps to a single DMA segment to make serveral things
152 * easier.
153 */
154 struct wm_control_data {
155 /*
156 * The transmit descriptors.
157 */
158 wiseman_txdesc_t wcd_txdescs[WM_NTXDESC];
159
160 /*
161 * The receive descriptors.
162 */
163 wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
164 };
165
166 #define WM_CDOFF(x) offsetof(struct wm_control_data, x)
167 #define WM_CDTXOFF(x) WM_CDOFF(wcd_txdescs[(x)])
168 #define WM_CDRXOFF(x) WM_CDOFF(wcd_rxdescs[(x)])
169
170 /*
171 * Software state for transmit jobs.
172 */
173 struct wm_txsoft {
174 struct mbuf *txs_mbuf; /* head of our mbuf chain */
175 bus_dmamap_t txs_dmamap; /* our DMA map */
176 int txs_firstdesc; /* first descriptor in packet */
177 int txs_lastdesc; /* last descriptor in packet */
178 };
179
180 /*
181 * Software state for receive buffers. Each descriptor gets a
182 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill
183 * more than one buffer, we chain them together.
184 */
185 struct wm_rxsoft {
186 struct mbuf *rxs_mbuf; /* head of our mbuf chain */
187 bus_dmamap_t rxs_dmamap; /* our DMA map */
188 };
189
190 /*
191 * Software state per device.
192 */
193 struct wm_softc {
194 struct device sc_dev; /* generic device information */
195 bus_space_tag_t sc_st; /* bus space tag */
196 bus_space_handle_t sc_sh; /* bus space handle */
197 bus_dma_tag_t sc_dmat; /* bus DMA tag */
198 struct ethercom sc_ethercom; /* ethernet common data */
199 void *sc_sdhook; /* shutdown hook */
200
201 int sc_type; /* chip type; see below */
202 int sc_flags; /* flags; see below */
203
204 void *sc_ih; /* interrupt cookie */
205
206 struct mii_data sc_mii; /* MII/media information */
207
208 struct callout sc_tick_ch; /* tick callout */
209
210 bus_dmamap_t sc_cddmamap; /* control data DMA map */
211 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr
212
213 /*
214 * Software state for the transmit and receive descriptors.
215 */
216 struct wm_txsoft sc_txsoft[WM_TXQUEUELEN];
217 struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
218
219 /*
220 * Control data structures.
221 */
222 struct wm_control_data *sc_control_data;
223 #define sc_txdescs sc_control_data->wcd_txdescs
224 #define sc_rxdescs sc_control_data->wcd_rxdescs
225
226 #ifdef WM_EVENT_COUNTERS
227 /* Event counters. */
228 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */
229 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */
230 struct evcnt sc_ev_txintr; /* Tx interrupts */
231 struct evcnt sc_ev_rxintr; /* Rx interrupts */
232 struct evcnt sc_ev_linkintr; /* Link interrupts */
233
234 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */
235 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */
236 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */
237 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */
238
239 struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
240 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */
241
242 struct evcnt sc_ev_tu; /* Tx underrun */
243 #endif /* WM_EVENT_COUNTERS */
244
245 bus_addr_t sc_tdt_reg; /* offset of TDT register */
246
247 int sc_txfree; /* number of free Tx descriptors */
248 int sc_txnext; /* next ready Tx descriptor */
249
250 int sc_txsfree; /* number of free Tx jobs */
251 int sc_txsnext; /* next free Tx job */
252 int sc_txsdirty; /* dirty Tx jobs */
253
254 bus_addr_t sc_rdt_reg; /* offset of RDT register */
255
256 int sc_rxptr; /* next ready Rx descriptor/queue ent */
257 int sc_rxdiscard;
258 int sc_rxlen;
259 struct mbuf *sc_rxhead;
260 struct mbuf *sc_rxtail;
261 struct mbuf **sc_rxtailp;
262
263 uint32_t sc_ctrl; /* prototype CTRL register */
264 #if 0
265 uint32_t sc_ctrl_ext; /* prototype CTRL_EXT register */
266 #endif
267 uint32_t sc_icr; /* prototype interrupt bits */
268 uint32_t sc_tctl; /* prototype TCTL register */
269 uint32_t sc_rctl; /* prototype RCTL register */
270 uint32_t sc_txcw; /* prototype TXCW register */
271 uint32_t sc_tipg; /* prototype TIPG register */
272
273 int sc_tbi_linkup; /* TBI link status */
274 int sc_tbi_anstate; /* autonegotiation state */
275
276 int sc_mchash_type; /* multicast filter offset */
277 };
278
279 #define WM_RXCHAIN_RESET(sc) \
280 do { \
281 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \
282 *(sc)->sc_rxtailp = NULL; \
283 (sc)->sc_rxlen = 0; \
284 } while (/*CONSTCOND*/0)
285
286 #define WM_RXCHAIN_LINK(sc, m) \
287 do { \
288 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \
289 (sc)->sc_rxtailp = &(m)->m_next; \
290 } while (/*CONSTCOND*/0)
291
292 /* sc_type */
293 #define WM_T_WISEMAN_2_0 0 /* Wiseman (i82542) 2.0 (really old) */
294 #define WM_T_WISEMAN_2_1 1 /* Wiseman (i82542) 2.1+ (old) */
295 #define WM_T_LIVENGOOD 2 /* Livengood (i82543) */
296 #define WM_T_CORDOVA 3 /* Cordova (i82544) */
297
298 /* sc_flags */
299 #define WM_F_HAS_MII 0x01 /* has MII */
300
301 #ifdef WM_EVENT_COUNTERS
302 #define WM_EVCNT_INCR(ev) (ev)->ev_count++
303 #else
304 #define WM_EVCNT_INCR(ev) /* nothing */
305 #endif
306
307 #define CSR_READ(sc, reg) \
308 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
309 #define CSR_WRITE(sc, reg, val) \
310 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
311
312 #define WM_CDTXADDR(sc, x) ((sc)->sc_cddma + WM_CDTXOFF((x)))
313 #define WM_CDRXADDR(sc, x) ((sc)->sc_cddma + WM_CDRXOFF((x)))
314
315 #define WM_CDTXSYNC(sc, x, n, ops) \
316 do { \
317 int __x, __n; \
318 \
319 __x = (x); \
320 __n = (n); \
321 \
322 /* If it will wrap around, sync to the end of the ring. */ \
323 if ((__x + __n) > WM_NTXDESC) { \
324 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
325 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * \
326 (WM_NTXDESC - __x), (ops)); \
327 __n -= (WM_NTXDESC - __x); \
328 __x = 0; \
329 } \
330 \
331 /* Now sync whatever is left. */ \
332 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
333 WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops)); \
334 } while (/*CONSTCOND*/0)
335
336 #define WM_CDRXSYNC(sc, x, ops) \
337 do { \
338 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \
339 WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops)); \
340 } while (/*CONSTCOND*/0)
341
342 #define WM_INIT_RXDESC(sc, x) \
343 do { \
344 struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \
345 wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)]; \
346 struct mbuf *__m = __rxs->rxs_mbuf; \
347 \
348 /* \
349 * Note: We scoot the packet forward 2 bytes in the buffer \
350 * so that the payload after the Ethernet header is aligned \
351 * to a 4-byte boundary. \
352 * \
353 * XXX BRAINDAMAGE ALERT! \
354 * The stupid chip uses the same size for every buffer, which \
355 * is set in the Receive Control register. We are using the 2K \
356 * size option, but what we REALLY want is (2K - 2)! For this \
357 * reason, we can't accept packets longer than the standard \
358 * Ethernet MTU, without incurring a big penalty to copy every \
359 * incoming packet to a new, suitably aligned buffer. \
360 * \
361 * We'll need to make some changes to the layer 3/4 parts of \
362 * the stack (to copy the headers to a new buffer if not \
363 * aligned) in order to support large MTU on this chip. Lame. \
364 */ \
365 __m->m_data = __m->m_ext.ext_buf + 2; \
366 \
367 __rxd->wrx_addr.wa_low = \
368 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + 2); \
369 __rxd->wrx_addr.wa_high = 0; \
370 __rxd->wrx_len = 0; \
371 __rxd->wrx_cksum = 0; \
372 __rxd->wrx_status = 0; \
373 __rxd->wrx_errors = 0; \
374 __rxd->wrx_special = 0; \
375 WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
376 \
377 CSR_WRITE((sc), (sc)->sc_rdt_reg, (x)); \
378 } while (/*CONSTCOND*/0)
379
380 void wm_start(struct ifnet *);
381 void wm_watchdog(struct ifnet *);
382 int wm_ioctl(struct ifnet *, u_long, caddr_t);
383 int wm_init(struct ifnet *);
384 void wm_stop(struct ifnet *, int);
385
386 void wm_shutdown(void *);
387
388 void wm_reset(struct wm_softc *);
389 void wm_rxdrain(struct wm_softc *);
390 int wm_add_rxbuf(struct wm_softc *, int);
391 void wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
392 void wm_tick(void *);
393
394 void wm_set_filter(struct wm_softc *);
395
396 int wm_intr(void *);
397 void wm_txintr(struct wm_softc *);
398 void wm_rxintr(struct wm_softc *);
399 void wm_linkintr(struct wm_softc *, uint32_t);
400
401 void wm_tbi_mediainit(struct wm_softc *);
402 int wm_tbi_mediachange(struct ifnet *);
403 void wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
404
405 void wm_tbi_set_linkled(struct wm_softc *);
406 void wm_tbi_check_link(struct wm_softc *);
407
408 void wm_gmii_reset(struct wm_softc *);
409
410 int wm_gmii_livengood_readreg(struct device *, int, int);
411 void wm_gmii_livengood_writereg(struct device *, int, int, int);
412
413 int wm_gmii_cordova_readreg(struct device *, int, int);
414 void wm_gmii_cordova_writereg(struct device *, int, int, int);
415
416 void wm_gmii_statchg(struct device *);
417
418 void wm_gmii_mediainit(struct wm_softc *);
419 int wm_gmii_mediachange(struct ifnet *);
420 void wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
421
422 int wm_match(struct device *, struct cfdata *, void *);
423 void wm_attach(struct device *, struct device *, void *);
424
425 int wm_copy_small = 0;
426
427 struct cfattach wm_ca = {
428 sizeof(struct wm_softc), wm_match, wm_attach,
429 };
430
431 /*
432 * Devices supported by this driver.
433 */
434 const struct wm_product {
435 pci_vendor_id_t wmp_vendor;
436 pci_product_id_t wmp_product;
437 const char *wmp_name;
438 int wmp_type;
439 int wmp_flags;
440 #define WMP_F_1000X 0x01
441 #define WMP_F_1000T 0x02
442 } wm_products[] = {
443 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82542,
444 "Intel i82542 1000BASE-X Ethernet",
445 WM_T_WISEMAN_2_1, WMP_F_1000X },
446
447 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_FIBER,
448 "Intel i82543 1000BASE-X Ethernet",
449 WM_T_LIVENGOOD, WMP_F_1000X },
450
451 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_SC,
452 "Intel i82543-SC 1000BASE-X Ethernet",
453 WM_T_LIVENGOOD, WMP_F_1000X },
454
455 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82543_COPPER,
456 "Intel i82543 1000BASE-T Ethernet",
457 WM_T_LIVENGOOD, WMP_F_1000T },
458
459 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XT,
460 "Intel i82544 1000BASE-T Ethernet",
461 WM_T_CORDOVA, WMP_F_1000T },
462
463 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544_XF,
464 "Intel i82544 1000BASE-X Ethernet",
465 WM_T_CORDOVA, WMP_F_1000X },
466
467 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC,
468 "Intel i82544GC 1000BASE-T Ethernet",
469 WM_T_CORDOVA, WMP_F_1000T },
470
471 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82544GC_64,
472 "Intel i82544GC 1000BASE-T Ethernet",
473 WM_T_CORDOVA, WMP_F_1000T },
474
475 { 0, 0,
476 NULL,
477 0, 0 },
478 };
479
480 #ifdef WM_EVENT_COUNTERS
481 #if WM_NTXSEGS != 16
482 #error Update wm_txseg_evcnt_names
483 #endif
484 static const char *wm_txseg_evcnt_names[WM_NTXSEGS] = {
485 "txseg1",
486 "txseg2",
487 "txseg3",
488 "txseg4",
489 "txseg5",
490 "txseg6",
491 "txseg7",
492 "txseg8",
493 "txseg9",
494 "txseg10",
495 "txseg11",
496 "txseg12",
497 "txseg13",
498 "txseg14",
499 "txseg15",
500 "txseg16",
501 };
502 #endif /* WM_EVENT_COUNTERS */
503
504 static const struct wm_product *
505 wm_lookup(const struct pci_attach_args *pa)
506 {
507 const struct wm_product *wmp;
508
509 for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
510 if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
511 PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
512 return (wmp);
513 }
514 return (NULL);
515 }
516
517 int
518 wm_match(struct device *parent, struct cfdata *cf, void *aux)
519 {
520 struct pci_attach_args *pa = aux;
521
522 if (wm_lookup(pa) != NULL)
523 return (1);
524
525 return (0);
526 }
527
528 void
529 wm_attach(struct device *parent, struct device *self, void *aux)
530 {
531 struct wm_softc *sc = (void *) self;
532 struct pci_attach_args *pa = aux;
533 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
534 pci_chipset_tag_t pc = pa->pa_pc;
535 pci_intr_handle_t ih;
536 const char *intrstr = NULL;
537 bus_space_tag_t memt;
538 bus_space_handle_t memh;
539 bus_dma_segment_t seg;
540 int memh_valid;
541 int i, rseg, error;
542 const struct wm_product *wmp;
543 uint8_t enaddr[ETHER_ADDR_LEN];
544 uint16_t myea[ETHER_ADDR_LEN / 2], cfg1, cfg2, swdpin;
545 pcireg_t preg, memtype;
546 int pmreg;
547
548 callout_init(&sc->sc_tick_ch);
549
550 wmp = wm_lookup(pa);
551 if (wmp == NULL) {
552 printf("\n");
553 panic("wm_attach: impossible");
554 }
555
556 sc->sc_dmat = pa->pa_dmat;
557
558 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
559 printf(": %s, rev. %d\n", wmp->wmp_name, preg);
560
561 sc->sc_type = wmp->wmp_type;
562 if (sc->sc_type < WM_T_LIVENGOOD) {
563 if (preg < 2) {
564 printf("%s: Wiseman must be at least rev. 2\n",
565 sc->sc_dev.dv_xname);
566 return;
567 }
568 if (preg < 3)
569 sc->sc_type = WM_T_WISEMAN_2_0;
570 }
571
572 /*
573 * Map the device.
574 */
575 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
576 switch (memtype) {
577 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
578 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
579 memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
580 memtype, 0, &memt, &memh, NULL, NULL) == 0);
581 break;
582 default:
583 memh_valid = 0;
584 }
585
586 if (memh_valid) {
587 sc->sc_st = memt;
588 sc->sc_sh = memh;
589 } else {
590 printf("%s: unable to map device registers\n",
591 sc->sc_dev.dv_xname);
592 return;
593 }
594
595 /* Enable bus mastering. Disable MWI on the Wiseman 2.0. */
596 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
597 preg |= PCI_COMMAND_MASTER_ENABLE;
598 if (sc->sc_type < WM_T_WISEMAN_2_1)
599 preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
600 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
601
602 /* Get it out of power save mode, if needed. */
603 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) {
604 preg = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3;
605 if (preg == 3) {
606 /*
607 * The card has lost all configuration data in
608 * this state, so punt.
609 */
610 printf("%s: unable to wake from power state D3\n",
611 sc->sc_dev.dv_xname);
612 return;
613 }
614 if (preg != 0) {
615 printf("%s: waking up from power state D%d\n",
616 sc->sc_dev.dv_xname, preg);
617 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0);
618 }
619 }
620
621 /*
622 * Map and establish our interrupt.
623 */
624 if (pci_intr_map(pa, &ih)) {
625 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname);
626 return;
627 }
628 intrstr = pci_intr_string(pc, ih);
629 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
630 if (sc->sc_ih == NULL) {
631 printf("%s: unable to establish interrupt",
632 sc->sc_dev.dv_xname);
633 if (intrstr != NULL)
634 printf(" at %s", intrstr);
635 printf("\n");
636 return;
637 }
638 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
639
640 /*
641 * Allocate the control data structures, and create and load the
642 * DMA map for it.
643 */
644 if ((error = bus_dmamem_alloc(sc->sc_dmat,
645 sizeof(struct wm_control_data), PAGE_SIZE, 0, &seg, 1, &rseg,
646 0)) != 0) {
647 printf("%s: unable to allocate control data, error = %d\n",
648 sc->sc_dev.dv_xname, error);
649 goto fail_0;
650 }
651
652 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
653 sizeof(struct wm_control_data), (caddr_t *)&sc->sc_control_data,
654 BUS_DMA_COHERENT)) != 0) {
655 printf("%s: unable to map control data, error = %d\n",
656 sc->sc_dev.dv_xname, error);
657 goto fail_1;
658 }
659
660 if ((error = bus_dmamap_create(sc->sc_dmat,
661 sizeof(struct wm_control_data), 1,
662 sizeof(struct wm_control_data), 0, 0, &sc->sc_cddmamap)) != 0) {
663 printf("%s: unable to create control data DMA map, "
664 "error = %d\n", sc->sc_dev.dv_xname, error);
665 goto fail_2;
666 }
667
668 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
669 sc->sc_control_data, sizeof(struct wm_control_data), NULL,
670 0)) != 0) {
671 printf("%s: unable to load control data DMA map, error = %d\n",
672 sc->sc_dev.dv_xname, error);
673 goto fail_3;
674 }
675
676 /*
677 * Create the transmit buffer DMA maps.
678 */
679 for (i = 0; i < WM_TXQUEUELEN; i++) {
680 if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_MAX_LEN_JUMBO,
681 WM_NTXSEGS, MCLBYTES, 0, 0,
682 &sc->sc_txsoft[i].txs_dmamap)) != 0) {
683 printf("%s: unable to create Tx DMA map %d, "
684 "error = %d\n", sc->sc_dev.dv_xname, i, error);
685 goto fail_4;
686 }
687 }
688
689 /*
690 * Create the receive buffer DMA maps.
691 */
692 for (i = 0; i < WM_NRXDESC; i++) {
693 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
694 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
695 printf("%s: unable to create Rx DMA map %d, "
696 "error = %d\n", sc->sc_dev.dv_xname, i, error);
697 goto fail_5;
698 }
699 sc->sc_rxsoft[i].rxs_mbuf = NULL;
700 }
701
702 /*
703 * Reset the chip to a known state.
704 */
705 wm_reset(sc);
706
707 /*
708 * Read the Ethernet address from the EEPROM.
709 */
710 wm_read_eeprom(sc, EEPROM_OFF_MACADDR,
711 sizeof(myea) / sizeof(myea[0]), myea);
712 enaddr[0] = myea[0] & 0xff;
713 enaddr[1] = myea[0] >> 8;
714 enaddr[2] = myea[1] & 0xff;
715 enaddr[3] = myea[1] >> 8;
716 enaddr[4] = myea[2] & 0xff;
717 enaddr[5] = myea[2] >> 8;
718
719 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
720 ether_sprintf(enaddr));
721
722 /*
723 * Read the config info from the EEPROM, and set up various
724 * bits in the control registers based on their contents.
725 */
726 wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1);
727 wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2);
728 if (sc->sc_type >= WM_T_CORDOVA)
729 wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin);
730
731 if (cfg1 & EEPROM_CFG1_ILOS)
732 sc->sc_ctrl |= CTRL_ILOS;
733 if (sc->sc_type >= WM_T_CORDOVA) {
734 sc->sc_ctrl |=
735 ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
736 CTRL_SWDPIO_SHIFT;
737 sc->sc_ctrl |=
738 ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
739 CTRL_SWDPINS_SHIFT;
740 } else {
741 sc->sc_ctrl |=
742 ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
743 CTRL_SWDPIO_SHIFT;
744 }
745
746 #if 0
747 if (sc->sc_type >= WM_T_CORDOVA) {
748 if (cfg1 & EEPROM_CFG1_IPS0)
749 sc->sc_ctrl_ext |= CTRL_EXT_IPS;
750 if (cfg1 & EEPROM_CFG1_IPS1)
751 sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
752 sc->sc_ctrl_ext |=
753 ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
754 CTRL_EXT_SWDPIO_SHIFT;
755 sc->sc_ctrl_ext |=
756 ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
757 CTRL_EXT_SWDPINS_SHIFT;
758 } else {
759 sc->sc_ctrl_ext |=
760 ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
761 CTRL_EXT_SWDPIO_SHIFT;
762 }
763 #endif
764
765 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
766 #if 0
767 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
768 #endif
769
770 /*
771 * Set up some register offsets that are different between
772 * the Wiseman and the Livengood and later chips.
773 */
774 if (sc->sc_type < WM_T_LIVENGOOD) {
775 sc->sc_rdt_reg = WMREG_OLD_RDT0;
776 sc->sc_tdt_reg = WMREG_OLD_TDT;
777 } else {
778 sc->sc_rdt_reg = WMREG_RDT;
779 sc->sc_tdt_reg = WMREG_TDT;
780 }
781
782 /*
783 * Determine if we should use flow control. We should
784 * always use it, unless we're on a Wiseman < 2.1.
785 */
786 if (sc->sc_type >= WM_T_WISEMAN_2_1)
787 sc->sc_ctrl |= CTRL_TFCE | CTRL_RFCE;
788
789 /*
790 * Determine if we're TBI or GMII mode, and initialize the
791 * media structures accordingly.
792 */
793 if (sc->sc_type < WM_T_LIVENGOOD ||
794 (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
795 if (wmp->wmp_flags & WMP_F_1000T)
796 printf("%s: WARNING: TBIMODE set on 1000BASE-T "
797 "product!\n", sc->sc_dev.dv_xname);
798 wm_tbi_mediainit(sc);
799 } else {
800 if (wmp->wmp_flags & WMP_F_1000X)
801 printf("%s: WARNING: TBIMODE clear on 1000BASE-X "
802 "product!\n", sc->sc_dev.dv_xname);
803 wm_gmii_mediainit(sc);
804 }
805
806 ifp = &sc->sc_ethercom.ec_if;
807 strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
808 ifp->if_softc = sc;
809 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
810 ifp->if_ioctl = wm_ioctl;
811 ifp->if_start = wm_start;
812 ifp->if_watchdog = wm_watchdog;
813 ifp->if_init = wm_init;
814 ifp->if_stop = wm_stop;
815 IFQ_SET_MAXLEN(&ifp->if_snd, WM_IFQUEUELEN);
816 IFQ_SET_READY(&ifp->if_snd);
817
818 /*
819 * If we're a Livengood or greater, we can support VLANs.
820 */
821 if (sc->sc_type >= WM_T_LIVENGOOD)
822 sc->sc_ethercom.ec_capabilities |=
823 ETHERCAP_VLAN_MTU /* XXXJRT | ETHERCAP_VLAN_HWTAGGING */;
824
825 /*
826 * We can perform TCPv4 and UDPv4 checkums in-bound. Only
827 * on Livengood and later.
828 */
829 if (sc->sc_type >= WM_T_LIVENGOOD)
830 ifp->if_capabilities |=
831 IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
832
833 /*
834 * Attach the interface.
835 */
836 if_attach(ifp);
837 ether_ifattach(ifp, enaddr);
838
839 #ifdef WM_EVENT_COUNTERS
840 /* Attach event counters. */
841 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
842 NULL, sc->sc_dev.dv_xname, "txsstall");
843 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
844 NULL, sc->sc_dev.dv_xname, "txdstall");
845 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR,
846 NULL, sc->sc_dev.dv_xname, "txintr");
847 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
848 NULL, sc->sc_dev.dv_xname, "rxintr");
849 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
850 NULL, sc->sc_dev.dv_xname, "linkintr");
851
852 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
853 NULL, sc->sc_dev.dv_xname, "rxipsum");
854 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
855 NULL, sc->sc_dev.dv_xname, "rxtusum");
856 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
857 NULL, sc->sc_dev.dv_xname, "txipsum");
858 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
859 NULL, sc->sc_dev.dv_xname, "txtusum");
860
861 for (i = 0; i < WM_NTXSEGS; i++)
862 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
863 NULL, sc->sc_dev.dv_xname, wm_txseg_evcnt_names[i]);
864
865 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
866 NULL, sc->sc_dev.dv_xname, "txdrop");
867
868 evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
869 NULL, sc->sc_dev.dv_xname, "tu");
870 #endif /* WM_EVENT_COUNTERS */
871
872 /*
873 * Make sure the interface is shutdown during reboot.
874 */
875 sc->sc_sdhook = shutdownhook_establish(wm_shutdown, sc);
876 if (sc->sc_sdhook == NULL)
877 printf("%s: WARNING: unable to establish shutdown hook\n",
878 sc->sc_dev.dv_xname);
879 return;
880
881 /*
882 * Free any resources we've allocated during the failed attach
883 * attempt. Do this in reverse order and fall through.
884 */
885 fail_5:
886 for (i = 0; i < WM_NRXDESC; i++) {
887 if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
888 bus_dmamap_destroy(sc->sc_dmat,
889 sc->sc_rxsoft[i].rxs_dmamap);
890 }
891 fail_4:
892 for (i = 0; i < WM_TXQUEUELEN; i++) {
893 if (sc->sc_txsoft[i].txs_dmamap != NULL)
894 bus_dmamap_destroy(sc->sc_dmat,
895 sc->sc_txsoft[i].txs_dmamap);
896 }
897 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
898 fail_3:
899 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
900 fail_2:
901 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data,
902 sizeof(struct wm_control_data));
903 fail_1:
904 bus_dmamem_free(sc->sc_dmat, &seg, rseg);
905 fail_0:
906 return;
907 }
908
909 /*
910 * wm_shutdown:
911 *
912 * Make sure the interface is stopped at reboot time.
913 */
914 void
915 wm_shutdown(void *arg)
916 {
917 struct wm_softc *sc = arg;
918
919 wm_stop(&sc->sc_ethercom.ec_if, 1);
920 }
921
922 /*
923 * wm_tx_cksum:
924 *
925 * Set up TCP/IP checksumming parameters for the
926 * specified packet.
927 */
928 static int
929 wm_tx_cksum(struct wm_softc *sc, struct mbuf *m0, uint32_t *cmdp,
930 uint32_t *fieldsp)
931 {
932 struct livengood_tcpip_ctxdesc *t;
933 uint32_t fields = 0, tcmd = 0, ipcs, tucs;
934 struct ip *ip;
935 int offset, iphl;
936
937 /*
938 * XXX It would be nice if the mbuf pkthdr had offset
939 * fields for the protocol headers.
940 */
941
942 /* XXX Assumes normal Ethernet encap. */
943 offset = ETHER_HDR_LEN;
944
945 /* XXX */
946 if (m0->m_len < (offset + sizeof(struct ip))) {
947 printf("%s: wm_tx_cksum: need to m_pullup, "
948 "packet dropped\n", sc->sc_dev.dv_xname);
949 return (EINVAL);
950 }
951
952 ip = (struct ip *) (mtod(m0, caddr_t) + offset);
953 iphl = ip->ip_hl << 2;
954
955 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
956 WM_EVCNT_INCR(&sc->sc_ev_txipsum);
957 tcmd |= htole32(WTX_TCPIP_CMD_IP);
958 fields |= htole32(WTX_IXSM);
959 ipcs = htole32(WTX_TCPIP_IPCSS(offset) |
960 WTX_TCPIP_IPCSO(offsetof(struct ip, ip_sum)) |
961 WTX_TCPIP_IPCSE(offset + iphl - 1));
962 } else
963 ipcs = 0;
964
965 offset += iphl;
966
967 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) {
968 WM_EVCNT_INCR(&sc->sc_ev_txtusum);
969 tcmd |= htole32(WTX_TCPIP_CMD_TCP);
970 fields |= htole32(WTX_TXSM);
971 tucs = htole32(WTX_TCPIP_TUCSS(offset) |
972 WTX_TCPIP_TUCSO(offset + m0->m_pkthdr.csum_data) |
973 WTX_TCPIP_TUCSE(0) /* rest of packet */);
974 } else
975 tucs = 0;
976
977 /* Fill in the context descriptor. */
978 t = (struct livengood_tcpip_ctxdesc *) &sc->sc_txdescs[sc->sc_txnext];
979 t->tcpip_ipcs = ipcs;
980 t->tcpip_tucs = tucs;
981 t->tcpip_cmdlen =
982 htole32(WTX_CMD_DEXT | WTX_CMD_IDE | WTX_DTYP_C) | tcmd;
983 t->tcpip_seg = 0;
984 WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
985
986 sc->sc_txnext = WM_NEXTTX(sc->sc_txnext);
987
988 *cmdp = WTX_CMD_DEXT | WTC_DTYP_D;
989 *fieldsp = fields;
990
991 return (0);
992 }
993
994 /*
995 * wm_start: [ifnet interface function]
996 *
997 * Start packet transmission on the interface.
998 */
999 void
1000 wm_start(struct ifnet *ifp)
1001 {
1002 struct wm_softc *sc = ifp->if_softc;
1003 struct mbuf *m0/*, *m*/;
1004 struct wm_txsoft *txs;
1005 bus_dmamap_t dmamap;
1006 int error, nexttx, lasttx, ofree, seg;
1007 uint32_t cksumcmd, cksumfields;
1008
1009 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
1010 return;
1011
1012 /*
1013 * Remember the previous number of free descriptors.
1014 */
1015 ofree = sc->sc_txfree;
1016
1017 /*
1018 * Loop through the send queue, setting up transmit descriptors
1019 * until we drain the queue, or use up all available transmit
1020 * descriptors.
1021 */
1022 for (;;) {
1023 /* Grab a packet off the queue. */
1024 IFQ_POLL(&ifp->if_snd, m0);
1025 if (m0 == NULL)
1026 break;
1027
1028 DPRINTF(WM_DEBUG_TX,
1029 ("%s: TX: have packet to transmit: %p\n",
1030 sc->sc_dev.dv_xname, m0));
1031
1032 /* Get a work queue entry. */
1033 if (sc->sc_txsfree == 0) {
1034 DPRINTF(WM_DEBUG_TX,
1035 ("%s: TX: no free job descriptors\n",
1036 sc->sc_dev.dv_xname));
1037 WM_EVCNT_INCR(&sc->sc_ev_txsstall);
1038 break;
1039 }
1040
1041 txs = &sc->sc_txsoft[sc->sc_txsnext];
1042 dmamap = txs->txs_dmamap;
1043
1044 /*
1045 * Load the DMA map. If this fails, the packet either
1046 * didn't fit in the allotted number of segments, or we
1047 * were short on resources. For the too-many-segments
1048 * case, we simply report an error and drop the packet,
1049 * since we can't sanely copy a jumbo packet to a single
1050 * buffer.
1051 */
1052 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
1053 BUS_DMA_WRITE|BUS_DMA_NOWAIT);
1054 if (error) {
1055 if (error == EFBIG) {
1056 WM_EVCNT_INCR(&sc->sc_ev_txdrop);
1057 printf("%s: Tx packet consumes too many "
1058 "DMA segments, dropping...\n",
1059 sc->sc_dev.dv_xname);
1060 IFQ_DEQUEUE(&ifp->if_snd, m0);
1061 m_freem(m0);
1062 continue;
1063 }
1064 /*
1065 * Short on resources, just stop for now.
1066 */
1067 DPRINTF(WM_DEBUG_TX,
1068 ("%s: TX: dmamap load failed: %d\n",
1069 sc->sc_dev.dv_xname, error));
1070 break;
1071 }
1072
1073 /*
1074 * Ensure we have enough descriptors free to describe
1075 * the packet. Note, we always reserve one descriptor
1076 * at the end of the ring due to the semantics of the
1077 * TDT register, plus one more in the event we need
1078 * to re-load checksum offload context.
1079 */
1080 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) {
1081 /*
1082 * Not enough free descriptors to transmit this
1083 * packet. We haven't committed anything yet,
1084 * so just unload the DMA map, put the packet
1085 * pack on the queue, and punt. Notify the upper
1086 * layer that there are no more slots left.
1087 */
1088 DPRINTF(WM_DEBUG_TX,
1089 ("%s: TX: need %d descriptors, have %d\n",
1090 sc->sc_dev.dv_xname, dmamap->dm_nsegs,
1091 sc->sc_txfree - 1));
1092 ifp->if_flags |= IFF_OACTIVE;
1093 bus_dmamap_unload(sc->sc_dmat, dmamap);
1094 WM_EVCNT_INCR(&sc->sc_ev_txdstall);
1095 break;
1096 }
1097
1098 IFQ_DEQUEUE(&ifp->if_snd, m0);
1099
1100 /*
1101 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
1102 */
1103
1104 /* Sync the DMA map. */
1105 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
1106 BUS_DMASYNC_PREWRITE);
1107
1108 DPRINTF(WM_DEBUG_TX,
1109 ("%s: TX: packet has %d DMA segments\n",
1110 sc->sc_dev.dv_xname, dmamap->dm_nsegs));
1111
1112 WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
1113
1114 /*
1115 * Set up checksum offload parameters for
1116 * this packet.
1117 */
1118 if (m0->m_pkthdr.csum_flags &
1119 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) {
1120 if (wm_tx_cksum(sc, m0, &cksumcmd, &cksumfields) != 0) {
1121 /* Error message already displayed. */
1122 m_freem(m0);
1123 bus_dmamap_unload(sc->sc_dmat, dmamap);
1124 continue;
1125 }
1126 } else {
1127 cksumcmd = 0;
1128 cksumfields = 0;
1129 }
1130
1131 /*
1132 * Initialize the transmit descriptor.
1133 */
1134 for (nexttx = sc->sc_txnext, seg = 0;
1135 seg < dmamap->dm_nsegs;
1136 seg++, nexttx = WM_NEXTTX(nexttx)) {
1137 /*
1138 * Note: we currently only use 32-bit DMA
1139 * addresses.
1140 */
1141 sc->sc_txdescs[nexttx].wtx_addr.wa_low =
1142 htole32(dmamap->dm_segs[seg].ds_addr);
1143 sc->sc_txdescs[nexttx].wtx_cmdlen = cksumcmd |
1144 htole32(dmamap->dm_segs[seg].ds_len);
1145 sc->sc_txdescs[nexttx].wtx_fields.wtxu_bits =
1146 cksumfields;
1147 lasttx = nexttx;
1148
1149 DPRINTF(WM_DEBUG_TX,
1150 ("%s: TX: desc %d: low 0x%08x, len 0x%04x\n",
1151 sc->sc_dev.dv_xname, nexttx,
1152 (uint32_t) dmamap->dm_segs[seg].ds_addr,
1153 (uint32_t) dmamap->dm_segs[seg].ds_len));
1154 }
1155
1156 /*
1157 * Set up the command byte on the last descriptor of
1158 * the packet. If we're in the interrupt delay window,
1159 * delay the interrupt.
1160 */
1161 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1162 htole32(WTX_CMD_EOP | WTX_CMD_IFCS | WTX_CMD_RS);
1163 if (sc->sc_txsnext & WM_TXINTR_MASK)
1164 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1165 htole32(WTX_CMD_IDE);
1166
1167 #if 0 /* XXXJRT */
1168 /*
1169 * If VLANs are enabled and the packet has a VLAN tag, set
1170 * up the descriptor to encapsulate the packet for us.
1171 *
1172 * This is only valid on the last descriptor of the packet.
1173 */
1174 if (sc->sc_ethercom.ec_nvlans != 0 &&
1175 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) {
1176 sc->sc_txdescs[lasttx].wtx_cmdlen |=
1177 htole32(WTX_CMD_VLE);
1178 sc->sc_txdescs[lasttx].wtx_fields.wtxu_fields.wtxu_vlan
1179 = htole16(*mtod(m, int *) & 0xffff);
1180 }
1181 #endif /* XXXJRT */
1182
1183 DPRINTF(WM_DEBUG_TX,
1184 ("%s: TX: desc %d: cmdlen 0x%08x\n", sc->sc_dev.dv_xname,
1185 lasttx, sc->sc_txdescs[lasttx].wtx_cmdlen));
1186
1187 /* Sync the descriptors we're using. */
1188 WM_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs,
1189 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1190
1191 /* Give the packet to the chip. */
1192 CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
1193
1194 DPRINTF(WM_DEBUG_TX,
1195 ("%s: TX: TDT -> %d\n", sc->sc_dev.dv_xname, nexttx));
1196
1197 /*
1198 * Store a pointer to the packet so we can free it later,
1199 * and remember that txdirty will be once the packet is
1200 * done.
1201 */
1202 txs->txs_mbuf = m0;
1203 txs->txs_firstdesc = sc->sc_txnext;
1204 txs->txs_lastdesc = lasttx;
1205
1206 DPRINTF(WM_DEBUG_TX,
1207 ("%s: TX: finished transmitting packet, job %d\n",
1208 sc->sc_dev.dv_xname, sc->sc_txsnext));
1209
1210 /* Advance the tx pointer. */
1211 sc->sc_txfree -= dmamap->dm_nsegs;
1212 sc->sc_txnext = nexttx;
1213
1214 sc->sc_txsfree--;
1215 sc->sc_txsnext = WM_NEXTTXS(sc->sc_txsnext);
1216
1217 #if NBPFILTER > 0
1218 /* Pass the packet to any BPF listeners. */
1219 if (ifp->if_bpf)
1220 bpf_mtap(ifp->if_bpf, m0);
1221 #endif /* NBPFILTER > 0 */
1222 }
1223
1224 if (sc->sc_txsfree == 0 || sc->sc_txfree == 0) {
1225 /* No more slots; notify upper layer. */
1226 ifp->if_flags |= IFF_OACTIVE;
1227 }
1228
1229 if (sc->sc_txfree != ofree) {
1230 /* Set a watchdog timer in case the chip flakes out. */
1231 ifp->if_timer = 5;
1232 }
1233 }
1234
1235 /*
1236 * wm_watchdog: [ifnet interface function]
1237 *
1238 * Watchdog timer handler.
1239 */
1240 void
1241 wm_watchdog(struct ifnet *ifp)
1242 {
1243 struct wm_softc *sc = ifp->if_softc;
1244
1245 /*
1246 * Since we're using delayed interrupts, sweep up
1247 * before we report an error.
1248 */
1249 wm_txintr(sc);
1250
1251 if (sc->sc_txfree != WM_NTXDESC) {
1252 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n",
1253 sc->sc_dev.dv_xname, sc->sc_txfree, sc->sc_txsfree,
1254 sc->sc_txnext);
1255 ifp->if_oerrors++;
1256
1257 /* Reset the interface. */
1258 (void) wm_init(ifp);
1259 }
1260
1261 /* Try to get more packets going. */
1262 wm_start(ifp);
1263 }
1264
1265 /*
1266 * wm_ioctl: [ifnet interface function]
1267 *
1268 * Handle control requests from the operator.
1269 */
1270 int
1271 wm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1272 {
1273 struct wm_softc *sc = ifp->if_softc;
1274 struct ifreq *ifr = (struct ifreq *) data;
1275 int s, error;
1276
1277 s = splnet();
1278
1279 switch (cmd) {
1280 case SIOCSIFMEDIA:
1281 case SIOCGIFMEDIA:
1282 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
1283 break;
1284
1285 default:
1286 error = ether_ioctl(ifp, cmd, data);
1287 if (error == ENETRESET) {
1288 /*
1289 * Multicast list has changed; set the hardware filter
1290 * accordingly.
1291 */
1292 wm_set_filter(sc);
1293 error = 0;
1294 }
1295 break;
1296 }
1297
1298 /* Try to get more packets going. */
1299 wm_start(ifp);
1300
1301 splx(s);
1302 return (error);
1303 }
1304
1305 /*
1306 * wm_intr:
1307 *
1308 * Interrupt service routine.
1309 */
1310 int
1311 wm_intr(void *arg)
1312 {
1313 struct wm_softc *sc = arg;
1314 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1315 uint32_t icr;
1316 int wantinit, handled = 0;
1317
1318 for (wantinit = 0; wantinit == 0;) {
1319 icr = CSR_READ(sc, WMREG_ICR);
1320 if ((icr & sc->sc_icr) == 0)
1321 break;
1322
1323 handled = 1;
1324
1325 if (icr & (ICR_RXDMT0|ICR_RXT0)) {
1326 DPRINTF(WM_DEBUG_RX,
1327 ("%s: RX: got Rx intr 0x%08x\n",
1328 sc->sc_dev.dv_xname,
1329 icr & (ICR_RXDMT0|ICR_RXT0)));
1330 WM_EVCNT_INCR(&sc->sc_ev_rxintr);
1331 wm_rxintr(sc);
1332 }
1333
1334 if (icr & ICR_TXDW) {
1335 DPRINTF(WM_DEBUG_TX,
1336 ("%s: TX: got TXDW interrupt\n",
1337 sc->sc_dev.dv_xname));
1338 WM_EVCNT_INCR(&sc->sc_ev_txintr);
1339 wm_txintr(sc);
1340 }
1341
1342 if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
1343 WM_EVCNT_INCR(&sc->sc_ev_linkintr);
1344 wm_linkintr(sc, icr);
1345 }
1346
1347 if (icr & ICR_RXO) {
1348 printf("%s: Receive overrun\n", sc->sc_dev.dv_xname);
1349 wantinit = 1;
1350 }
1351 }
1352
1353 if (handled) {
1354 if (wantinit)
1355 wm_init(ifp);
1356
1357 /* Try to get more packets going. */
1358 wm_start(ifp);
1359 }
1360
1361 return (handled);
1362 }
1363
1364 /*
1365 * wm_txintr:
1366 *
1367 * Helper; handle transmit interrupts.
1368 */
1369 void
1370 wm_txintr(struct wm_softc *sc)
1371 {
1372 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1373 struct wm_txsoft *txs;
1374 uint8_t status;
1375 int i;
1376
1377 ifp->if_flags &= ~IFF_OACTIVE;
1378
1379 /*
1380 * Go through the Tx list and free mbufs for those
1381 * frams which have been transmitted.
1382 */
1383 for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN;
1384 i = WM_NEXTTXS(i), sc->sc_txsfree++) {
1385 txs = &sc->sc_txsoft[i];
1386
1387 DPRINTF(WM_DEBUG_TX,
1388 ("%s: TX: checking job %d\n", sc->sc_dev.dv_xname, i));
1389
1390 WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs,
1391 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1392
1393 status = le32toh(sc->sc_txdescs[
1394 txs->txs_lastdesc].wtx_fields.wtxu_bits);
1395 if ((status & WTX_ST_DD) == 0)
1396 break;
1397
1398 DPRINTF(WM_DEBUG_TX,
1399 ("%s: TX: job %d done: descs %d..%d\n",
1400 sc->sc_dev.dv_xname, i, txs->txs_firstdesc,
1401 txs->txs_lastdesc));
1402
1403 /*
1404 * XXX We should probably be using the statistics
1405 * XXX registers, but I don't know if they exist
1406 * XXX on chips before the Cordova.
1407 */
1408
1409 #ifdef WM_EVENT_COUNTERS
1410 if (status & WTX_ST_TU)
1411 WM_EVCNT_INCR(&sc->sc_ev_tu);
1412 #endif /* WM_EVENT_COUNTERS */
1413
1414 if (status & (WTX_ST_EC|WTX_ST_LC)) {
1415 ifp->if_oerrors++;
1416 if (status & WTX_ST_LC)
1417 printf("%s: late collision\n",
1418 sc->sc_dev.dv_xname);
1419 else if (status & WTX_ST_EC) {
1420 ifp->if_collisions += 16;
1421 printf("%s: excessive collisions\n",
1422 sc->sc_dev.dv_xname);
1423 }
1424 } else
1425 ifp->if_opackets++;
1426
1427 sc->sc_txfree += txs->txs_dmamap->dm_nsegs;
1428 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
1429 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1430 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
1431 m_freem(txs->txs_mbuf);
1432 txs->txs_mbuf = NULL;
1433 }
1434
1435 /* Update the dirty transmit buffer pointer. */
1436 sc->sc_txsdirty = i;
1437 DPRINTF(WM_DEBUG_TX,
1438 ("%s: TX: txsdirty -> %d\n", sc->sc_dev.dv_xname, i));
1439
1440 /*
1441 * If there are no more pending transmissions, cancel the watchdog
1442 * timer.
1443 */
1444 if (sc->sc_txsfree == WM_TXQUEUELEN)
1445 ifp->if_timer = 0;
1446 }
1447
1448 /*
1449 * wm_rxintr:
1450 *
1451 * Helper; handle receive interrupts.
1452 */
1453 void
1454 wm_rxintr(struct wm_softc *sc)
1455 {
1456 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1457 struct wm_rxsoft *rxs;
1458 struct mbuf *m;
1459 int i, len;
1460 uint8_t status, errors;
1461
1462 for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
1463 rxs = &sc->sc_rxsoft[i];
1464
1465 DPRINTF(WM_DEBUG_RX,
1466 ("%s: RX: checking descriptor %d\n",
1467 sc->sc_dev.dv_xname, i));
1468
1469 WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1470
1471 status = sc->sc_rxdescs[i].wrx_status;
1472 errors = sc->sc_rxdescs[i].wrx_errors;
1473 len = le16toh(sc->sc_rxdescs[i].wrx_len);
1474
1475 if ((status & WRX_ST_DD) == 0) {
1476 /*
1477 * We have processed all of the receive descriptors.
1478 */
1479 break;
1480 }
1481
1482 if (__predict_false(sc->sc_rxdiscard)) {
1483 DPRINTF(WM_DEBUG_RX,
1484 ("%s: RX: discarding contents of descriptor %d\n",
1485 sc->sc_dev.dv_xname, i));
1486 WM_INIT_RXDESC(sc, i);
1487 if (status & WRX_ST_EOP) {
1488 /* Reset our state. */
1489 DPRINTF(WM_DEBUG_RX,
1490 ("%s: RX: resetting rxdiscard -> 0\n",
1491 sc->sc_dev.dv_xname));
1492 sc->sc_rxdiscard = 0;
1493 }
1494 continue;
1495 }
1496
1497 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1498 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
1499
1500 m = rxs->rxs_mbuf;
1501
1502 /*
1503 * Add a new receive buffer to the ring.
1504 */
1505 if (wm_add_rxbuf(sc, i) != 0) {
1506 /*
1507 * Failed, throw away what we've done so
1508 * far, and discard the rest of the packet.
1509 */
1510 ifp->if_ierrors++;
1511 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
1512 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1513 WM_INIT_RXDESC(sc, i);
1514 if ((status & WRX_ST_EOP) == 0)
1515 sc->sc_rxdiscard = 1;
1516 if (sc->sc_rxhead != NULL)
1517 m_freem(sc->sc_rxhead);
1518 WM_RXCHAIN_RESET(sc);
1519 DPRINTF(WM_DEBUG_RX,
1520 ("%s: RX: Rx buffer allocation failed, "
1521 "dropping packet%s\n", sc->sc_dev.dv_xname,
1522 sc->sc_rxdiscard ? " (discard)" : ""));
1523 continue;
1524 }
1525
1526 WM_RXCHAIN_LINK(sc, m);
1527
1528 m->m_len = len;
1529
1530 DPRINTF(WM_DEBUG_RX,
1531 ("%s: RX: buffer at %p len %d\n",
1532 sc->sc_dev.dv_xname, m->m_data, len));
1533
1534 /*
1535 * If this is not the end of the packet, keep
1536 * looking.
1537 */
1538 if ((status & WRX_ST_EOP) == 0) {
1539 sc->sc_rxlen += len;
1540 DPRINTF(WM_DEBUG_RX,
1541 ("%s: RX: not yet EOP, rxlen -> %d\n",
1542 sc->sc_dev.dv_xname, sc->sc_rxlen));
1543 continue;
1544 }
1545
1546 /*
1547 * Okay, we have the entire packet now...
1548 */
1549 *sc->sc_rxtailp = NULL;
1550 m = sc->sc_rxhead;
1551 len += sc->sc_rxlen;
1552
1553 WM_RXCHAIN_RESET(sc);
1554
1555 DPRINTF(WM_DEBUG_RX,
1556 ("%s: RX: have entire packet, len -> %d\n",
1557 sc->sc_dev.dv_xname, len));
1558
1559 /*
1560 * If an error occurred, update stats and drop the packet.
1561 */
1562 if (errors &
1563 (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
1564 ifp->if_ierrors++;
1565 if (errors & WRX_ER_SE)
1566 printf("%s: symbol error\n",
1567 sc->sc_dev.dv_xname);
1568 else if (errors & WRX_ER_SEQ)
1569 printf("%s: receive sequence error\n",
1570 sc->sc_dev.dv_xname);
1571 else if (errors & WRX_ER_CE)
1572 printf("%s: CRC error\n",
1573 sc->sc_dev.dv_xname);
1574 m_freem(m);
1575 continue;
1576 }
1577
1578 /*
1579 * No errors. Receive the packet.
1580 *
1581 * Note, we have configured the chip to include the
1582 * CRC with every packet.
1583 */
1584 m->m_flags |= M_HASFCS;
1585 m->m_pkthdr.rcvif = ifp;
1586 m->m_pkthdr.len = len;
1587
1588 #if 0 /* XXXJRT */
1589 /*
1590 * If VLANs are enabled, VLAN packets have been unwrapped
1591 * for us. Associate the tag with the packet.
1592 */
1593 if (sc->sc_ethercom.ec_nvlans != 0 &&
1594 (status & WRX_ST_VP) != 0) {
1595 struct mbuf *vtag;
1596
1597 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN);
1598 if (vtag == NULL) {
1599 ifp->if_ierrors++;
1600 printf("%s: unable to allocate VLAN tag\n",
1601 sc->sc_dev.dv_xname);
1602 m_freem(m);
1603 continue;
1604 }
1605
1606 *mtod(m, int *) =
1607 le16toh(sc->sc_rxdescs[i].wrx_special);
1608 vtag->m_len = sizeof(int);
1609 }
1610 #endif /* XXXJRT */
1611
1612 /*
1613 * Set up checksum info for this packet.
1614 */
1615 if (status & WRX_ST_IPCS) {
1616 WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
1617 m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
1618 if (errors & WRX_ER_IPE)
1619 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
1620 }
1621 if (status & WRX_ST_TCPCS) {
1622 /*
1623 * Note: we don't know if this was TCP or UDP,
1624 * so we just set both bits, and expect the
1625 * upper layers to deal.
1626 */
1627 WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
1628 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4;
1629 if (errors & WRX_ER_TCPE)
1630 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
1631 }
1632
1633 ifp->if_ipackets++;
1634
1635 #if NBPFILTER > 0
1636 /* Pass this up to any BPF listeners. */
1637 if (ifp->if_bpf)
1638 bpf_mtap(ifp->if_bpf, m);
1639 #endif /* NBPFILTER > 0 */
1640
1641 /* Pass it on. */
1642 (*ifp->if_input)(ifp, m);
1643 }
1644
1645 /* Update the receive pointer. */
1646 sc->sc_rxptr = i;
1647
1648 DPRINTF(WM_DEBUG_RX,
1649 ("%s: RX: rxptr -> %d\n", sc->sc_dev.dv_xname, i));
1650 }
1651
1652 /*
1653 * wm_linkintr:
1654 *
1655 * Helper; handle link interrupts.
1656 */
1657 void
1658 wm_linkintr(struct wm_softc *sc, uint32_t icr)
1659 {
1660 uint32_t status;
1661
1662 /*
1663 * If we get a link status interrupt on a 1000BASE-T
1664 * device, just fall into the normal MII tick path.
1665 */
1666 if (sc->sc_flags & WM_F_HAS_MII) {
1667 if (icr & ICR_LSC) {
1668 DPRINTF(WM_DEBUG_LINK,
1669 ("%s: LINK: LSC -> mii_tick\n",
1670 sc->sc_dev.dv_xname));
1671 mii_tick(&sc->sc_mii);
1672 } else if (icr & ICR_RXSEQ) {
1673 DPRINTF(WM_DEBUG_LINK,
1674 ("%s: LINK Receive sequence error\n",
1675 sc->sc_dev.dv_xname));
1676 }
1677 return;
1678 }
1679
1680 /*
1681 * If we are now receiving /C/, check for link again in
1682 * a couple of link clock ticks.
1683 */
1684 if (icr & ICR_RXCFG) {
1685 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
1686 sc->sc_dev.dv_xname));
1687 sc->sc_tbi_anstate = 2;
1688 }
1689
1690 if (icr & ICR_LSC) {
1691 status = CSR_READ(sc, WMREG_STATUS);
1692 if (status & STATUS_LU) {
1693 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
1694 sc->sc_dev.dv_xname,
1695 (status & STATUS_FD) ? "FDX" : "HDX"));
1696 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
1697 if (status & STATUS_FD)
1698 sc->sc_tctl |=
1699 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1700 else
1701 sc->sc_tctl |=
1702 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
1703 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1704 sc->sc_tbi_linkup = 1;
1705 } else {
1706 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
1707 sc->sc_dev.dv_xname));
1708 sc->sc_tbi_linkup = 0;
1709 }
1710 sc->sc_tbi_anstate = 2;
1711 wm_tbi_set_linkled(sc);
1712 } else if (icr & ICR_RXSEQ) {
1713 DPRINTF(WM_DEBUG_LINK,
1714 ("%s: LINK: Receive sequence error\n",
1715 sc->sc_dev.dv_xname));
1716 }
1717 }
1718
1719 /*
1720 * wm_tick:
1721 *
1722 * One second timer, used to check link status, sweep up
1723 * completed transmit jobs, etc.
1724 */
1725 void
1726 wm_tick(void *arg)
1727 {
1728 struct wm_softc *sc = arg;
1729 int s;
1730
1731 s = splnet();
1732
1733 if (sc->sc_flags & WM_F_HAS_MII)
1734 mii_tick(&sc->sc_mii);
1735 else
1736 wm_tbi_check_link(sc);
1737
1738 splx(s);
1739
1740 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1741 }
1742
1743 /*
1744 * wm_reset:
1745 *
1746 * Reset the i82542 chip.
1747 */
1748 void
1749 wm_reset(struct wm_softc *sc)
1750 {
1751 int i;
1752
1753 CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
1754 delay(10000);
1755
1756 for (i = 0; i < 1000; i++) {
1757 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_RST) == 0)
1758 return;
1759 delay(20);
1760 }
1761
1762 if (CSR_READ(sc, WMREG_CTRL) & CTRL_RST)
1763 printf("%s: WARNING: reset failed to complete\n",
1764 sc->sc_dev.dv_xname);
1765 }
1766
1767 /*
1768 * wm_init: [ifnet interface function]
1769 *
1770 * Initialize the interface. Must be called at splnet().
1771 */
1772 int
1773 wm_init(struct ifnet *ifp)
1774 {
1775 struct wm_softc *sc = ifp->if_softc;
1776 struct wm_rxsoft *rxs;
1777 int i, error = 0;
1778 uint32_t reg;
1779
1780 /* Cancel any pending I/O. */
1781 wm_stop(ifp, 0);
1782
1783 /* Reset the chip to a known state. */
1784 wm_reset(sc);
1785
1786 /* Initialize the transmit descriptor ring. */
1787 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs));
1788 WM_CDTXSYNC(sc, 0, WM_NTXDESC,
1789 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1790 sc->sc_txfree = WM_NTXDESC;
1791 sc->sc_txnext = 0;
1792
1793 if (sc->sc_type < WM_T_LIVENGOOD) {
1794 CSR_WRITE(sc, WMREG_OLD_TBDAH, 0);
1795 CSR_WRITE(sc, WMREG_OLD_TBDAL, WM_CDTXADDR(sc, 0));
1796 CSR_WRITE(sc, WMREG_OLD_TDLEN, sizeof(sc->sc_txdescs));
1797 CSR_WRITE(sc, WMREG_OLD_TDH, 0);
1798 CSR_WRITE(sc, WMREG_OLD_TDT, 0);
1799 CSR_WRITE(sc, WMREG_OLD_TIDV, 64);
1800 } else {
1801 CSR_WRITE(sc, WMREG_TBDAH, 0);
1802 CSR_WRITE(sc, WMREG_TBDAL, WM_CDTXADDR(sc, 0));
1803 CSR_WRITE(sc, WMREG_TDLEN, sizeof(sc->sc_txdescs));
1804 CSR_WRITE(sc, WMREG_TDH, 0);
1805 CSR_WRITE(sc, WMREG_TDT, 0);
1806 CSR_WRITE(sc, WMREG_TIDV, 64);
1807
1808 CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
1809 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
1810 CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
1811 RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
1812 }
1813 CSR_WRITE(sc, WMREG_TQSA_LO, 0);
1814 CSR_WRITE(sc, WMREG_TQSA_HI, 0);
1815
1816 /* Initialize the transmit job descriptors. */
1817 for (i = 0; i < WM_TXQUEUELEN; i++)
1818 sc->sc_txsoft[i].txs_mbuf = NULL;
1819 sc->sc_txsfree = WM_TXQUEUELEN;
1820 sc->sc_txsnext = 0;
1821 sc->sc_txsdirty = 0;
1822
1823 /*
1824 * Initialize the receive descriptor and receive job
1825 * descriptor rings.
1826 */
1827 if (sc->sc_type < WM_T_LIVENGOOD) {
1828 CSR_WRITE(sc, WMREG_OLD_RDBAH0, 0);
1829 CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR(sc, 0));
1830 CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
1831 CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
1832 CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
1833 CSR_WRITE(sc, WMREG_OLD_RDTR0, 64 | RDTR_FPD);
1834
1835 CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
1836 CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
1837 CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
1838 CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
1839 CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
1840 CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
1841 } else {
1842 CSR_WRITE(sc, WMREG_RDBAH, 0);
1843 CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR(sc, 0));
1844 CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
1845 CSR_WRITE(sc, WMREG_RDH, 0);
1846 CSR_WRITE(sc, WMREG_RDT, 0);
1847 CSR_WRITE(sc, WMREG_RDTR, 64 | RDTR_FPD);
1848 }
1849 for (i = 0; i < WM_NRXDESC; i++) {
1850 rxs = &sc->sc_rxsoft[i];
1851 if (rxs->rxs_mbuf == NULL) {
1852 if ((error = wm_add_rxbuf(sc, i)) != 0) {
1853 printf("%s: unable to allocate or map rx "
1854 "buffer %d, error = %d\n",
1855 sc->sc_dev.dv_xname, i, error);
1856 /*
1857 * XXX Should attempt to run with fewer receive
1858 * XXX buffers instead of just failing.
1859 */
1860 wm_rxdrain(sc);
1861 goto out;
1862 }
1863 } else
1864 WM_INIT_RXDESC(sc, i);
1865 }
1866 sc->sc_rxptr = 0;
1867 sc->sc_rxdiscard = 0;
1868 WM_RXCHAIN_RESET(sc);
1869
1870 /*
1871 * Clear out the VLAN table -- we don't use it (yet).
1872 */
1873 CSR_WRITE(sc, WMREG_VET, 0);
1874 for (i = 0; i < WM_VLAN_TABSIZE; i++)
1875 CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
1876
1877 /*
1878 * Set up flow-control parameters.
1879 *
1880 * XXX Values could probably stand some tuning.
1881 */
1882 if (sc->sc_ctrl & (CTRL_RFCE|CTRL_TFCE)) {
1883 CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
1884 CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
1885 CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
1886
1887 if (sc->sc_type < WM_T_LIVENGOOD) {
1888 CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
1889 CSR_WRITE(sc, WMREG_OLD_FCRTL, FCRTL_DFLT);
1890 } else {
1891 CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
1892 CSR_WRITE(sc, WMREG_FCRTL, FCRTL_DFLT);
1893 }
1894 CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
1895 }
1896
1897 #if 0 /* XXXJRT */
1898 /* Deal with VLAN enables. */
1899 if (sc->sc_ethercom.ec_nvlans != 0)
1900 sc->sc_ctrl |= CTRL_VME;
1901 else
1902 #endif /* XXXJRT */
1903 sc->sc_ctrl &= ~CTRL_VME;
1904
1905 /* Write the control registers. */
1906 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1907 #if 0
1908 CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1909 #endif
1910
1911 /*
1912 * Set up checksum offload parameters.
1913 */
1914 reg = CSR_READ(sc, WMREG_RXCSUM);
1915 if (ifp->if_capenable & IFCAP_CSUM_IPv4)
1916 reg |= RXCSUM_IPOFL;
1917 else
1918 reg &= ~RXCSUM_IPOFL;
1919 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4))
1920 reg |= RXCSUM_TUOFL;
1921 else
1922 reg &= ~RXCSUM_TUOFL;
1923 CSR_WRITE(sc, WMREG_RXCSUM, reg);
1924
1925 /*
1926 * Set up the interrupt registers.
1927 */
1928 CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
1929 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
1930 ICR_RXO | ICR_RXT0;
1931 if ((sc->sc_flags & WM_F_HAS_MII) == 0)
1932 sc->sc_icr |= ICR_RXCFG;
1933 CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
1934
1935 /* Set up the inter-packet gap. */
1936 CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
1937
1938 #if 0 /* XXXJRT */
1939 /* Set the VLAN ethernetype. */
1940 CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
1941 #endif
1942
1943 /*
1944 * Set up the transmit control register; we start out with
1945 * a collision distance suitable for FDX, but update it whe
1946 * we resolve the media type.
1947 */
1948 sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_CT(TX_COLLISION_THRESHOLD) |
1949 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
1950 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
1951
1952 /* Set the media. */
1953 (void) (*sc->sc_mii.mii_media.ifm_change)(ifp);
1954
1955 /*
1956 * Set up the receive control register; we actually program
1957 * the register when we set the receive filter. Use multicast
1958 * address offset type 0.
1959 *
1960 * Only the Cordova has the ability to strip the incoming
1961 * CRC, so we don't enable that feature.
1962 */
1963 sc->sc_mchash_type = 0;
1964 sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_2k |
1965 RCTL_DPF | RCTL_MO(sc->sc_mchash_type);
1966
1967 /* Set the receive filter. */
1968 wm_set_filter(sc);
1969
1970 /* Start the one second link check clock. */
1971 callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
1972
1973 /* ...all done! */
1974 ifp->if_flags |= IFF_RUNNING;
1975 ifp->if_flags &= ~IFF_OACTIVE;
1976
1977 out:
1978 if (error)
1979 printf("%s: interface not running\n", sc->sc_dev.dv_xname);
1980 return (error);
1981 }
1982
1983 /*
1984 * wm_rxdrain:
1985 *
1986 * Drain the receive queue.
1987 */
1988 void
1989 wm_rxdrain(struct wm_softc *sc)
1990 {
1991 struct wm_rxsoft *rxs;
1992 int i;
1993
1994 for (i = 0; i < WM_NRXDESC; i++) {
1995 rxs = &sc->sc_rxsoft[i];
1996 if (rxs->rxs_mbuf != NULL) {
1997 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
1998 m_freem(rxs->rxs_mbuf);
1999 rxs->rxs_mbuf = NULL;
2000 }
2001 }
2002 }
2003
2004 /*
2005 * wm_stop: [ifnet interface function]
2006 *
2007 * Stop transmission on the interface.
2008 */
2009 void
2010 wm_stop(struct ifnet *ifp, int disable)
2011 {
2012 struct wm_softc *sc = ifp->if_softc;
2013 struct wm_txsoft *txs;
2014 int i;
2015
2016 /* Stop the one second clock. */
2017 callout_stop(&sc->sc_tick_ch);
2018
2019 if (sc->sc_flags & WM_F_HAS_MII) {
2020 /* Down the MII. */
2021 mii_down(&sc->sc_mii);
2022 }
2023
2024 /* Stop the transmit and receive processes. */
2025 CSR_WRITE(sc, WMREG_TCTL, 0);
2026 CSR_WRITE(sc, WMREG_RCTL, 0);
2027
2028 /* Release any queued transmit buffers. */
2029 for (i = 0; i < WM_TXQUEUELEN; i++) {
2030 txs = &sc->sc_txsoft[i];
2031 if (txs->txs_mbuf != NULL) {
2032 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
2033 m_freem(txs->txs_mbuf);
2034 txs->txs_mbuf = NULL;
2035 }
2036 }
2037
2038 if (disable)
2039 wm_rxdrain(sc);
2040
2041 /* Mark the interface as down and cancel the watchdog timer. */
2042 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
2043 ifp->if_timer = 0;
2044 }
2045
2046 /*
2047 * wm_read_eeprom:
2048 *
2049 * Read data from the serial EEPROM.
2050 */
2051 void
2052 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
2053 {
2054 uint32_t reg;
2055 int i, x;
2056
2057 for (i = 0; i < wordcnt; i++) {
2058 /* Send CHIP SELECT for one clock tick. */
2059 CSR_WRITE(sc, WMREG_EECD, EECD_CS);
2060 delay(2);
2061
2062 /* Shift in the READ command. */
2063 for (x = 3; x > 0; x--) {
2064 reg = EECD_CS;
2065 if (UWIRE_OPC_READ & (1 << (x - 1)))
2066 reg |= EECD_DI;
2067 CSR_WRITE(sc, WMREG_EECD, reg);
2068 delay(2);
2069 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2070 delay(2);
2071 CSR_WRITE(sc, WMREG_EECD, reg);
2072 delay(2);
2073 }
2074
2075 /* Shift in address. */
2076 for (x = 6; x > 0; x--) {
2077 reg = EECD_CS;
2078 if ((word + i) & (1 << (x - 1)))
2079 reg |= EECD_DI;
2080 CSR_WRITE(sc, WMREG_EECD, reg);
2081 delay(2);
2082 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2083 delay(2);
2084 CSR_WRITE(sc, WMREG_EECD, reg);
2085 delay(2);
2086 }
2087
2088 /* Shift out the data. */
2089 reg = EECD_CS;
2090 data[i] = 0;
2091 for (x = 16; x > 0; x--) {
2092 CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
2093 delay(2);
2094 if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
2095 data[i] |= (1 << (x - 1));
2096 CSR_WRITE(sc, WMREG_EECD, reg);
2097 delay(2);
2098 }
2099
2100 /* Clear CHIP SELECT. */
2101 CSR_WRITE(sc, WMREG_EECD, 0);
2102 }
2103 }
2104
2105 /*
2106 * wm_add_rxbuf:
2107 *
2108 * Add a receive buffer to the indiciated descriptor.
2109 */
2110 int
2111 wm_add_rxbuf(struct wm_softc *sc, int idx)
2112 {
2113 struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
2114 struct mbuf *m;
2115 int error;
2116
2117 MGETHDR(m, M_DONTWAIT, MT_DATA);
2118 if (m == NULL)
2119 return (ENOBUFS);
2120
2121 MCLGET(m, M_DONTWAIT);
2122 if ((m->m_flags & M_EXT) == 0) {
2123 m_freem(m);
2124 return (ENOBUFS);
2125 }
2126
2127 if (rxs->rxs_mbuf != NULL)
2128 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
2129
2130 rxs->rxs_mbuf = m;
2131
2132 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap,
2133 m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
2134 BUS_DMA_READ|BUS_DMA_NOWAIT);
2135 if (error) {
2136 printf("%s: unable to load rx DMA map %d, error = %d\n",
2137 sc->sc_dev.dv_xname, idx, error);
2138 panic("wm_add_rxbuf"); /* XXX XXX XXX */
2139 }
2140
2141 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
2142 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
2143
2144 WM_INIT_RXDESC(sc, idx);
2145
2146 return (0);
2147 }
2148
2149 /*
2150 * wm_set_ral:
2151 *
2152 * Set an entery in the receive address list.
2153 */
2154 static void
2155 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2156 {
2157 uint32_t ral_lo, ral_hi;
2158
2159 if (enaddr != NULL) {
2160 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2161 (enaddr[3] << 24);
2162 ral_hi = enaddr[4] | (enaddr[5] << 8);
2163 ral_hi |= RAL_AV;
2164 } else {
2165 ral_lo = 0;
2166 ral_hi = 0;
2167 }
2168
2169 if (sc->sc_type >= WM_T_CORDOVA) {
2170 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2171 ral_lo);
2172 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2173 ral_hi);
2174 } else {
2175 CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2176 CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2177 }
2178 }
2179
2180 /*
2181 * wm_mchash:
2182 *
2183 * Compute the hash of the multicast address for the 4096-bit
2184 * multicast filter.
2185 */
2186 static uint32_t
2187 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2188 {
2189 static const int lo_shift[4] = { 4, 3, 2, 0 };
2190 static const int hi_shift[4] = { 4, 5, 6, 8 };
2191 uint32_t hash;
2192
2193 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
2194 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
2195
2196 return (hash & 0xfff);
2197 }
2198
2199 /*
2200 * wm_set_filter:
2201 *
2202 * Set up the receive filter.
2203 */
2204 void
2205 wm_set_filter(struct wm_softc *sc)
2206 {
2207 struct ethercom *ec = &sc->sc_ethercom;
2208 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2209 struct ether_multi *enm;
2210 struct ether_multistep step;
2211 bus_addr_t mta_reg;
2212 uint32_t hash, reg, bit;
2213 int i;
2214
2215 if (sc->sc_type >= WM_T_CORDOVA)
2216 mta_reg = WMREG_CORDOVA_MTA;
2217 else
2218 mta_reg = WMREG_MTA;
2219
2220 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
2221
2222 if (ifp->if_flags & IFF_BROADCAST)
2223 sc->sc_rctl |= RCTL_BAM;
2224 if (ifp->if_flags & IFF_PROMISC) {
2225 sc->sc_rctl |= RCTL_UPE;
2226 goto allmulti;
2227 }
2228
2229 /*
2230 * Set the station address in the first RAL slot, and
2231 * clear the remaining slots.
2232 */
2233 wm_set_ral(sc, LLADDR(ifp->if_sadl), 0);
2234 for (i = 1; i < WM_RAL_TABSIZE; i++)
2235 wm_set_ral(sc, NULL, i);
2236
2237 /* Clear out the multicast table. */
2238 for (i = 0; i < WM_MC_TABSIZE; i++)
2239 CSR_WRITE(sc, mta_reg + (i << 2), 0);
2240
2241 ETHER_FIRST_MULTI(step, ec, enm);
2242 while (enm != NULL) {
2243 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
2244 /*
2245 * We must listen to a range of multicast addresses.
2246 * For now, just accept all multicasts, rather than
2247 * trying to set only those filter bits needed to match
2248 * the range. (At this time, the only use of address
2249 * ranges is for IP multicast routing, for which the
2250 * range is big enough to require all bits set.)
2251 */
2252 goto allmulti;
2253 }
2254
2255 hash = wm_mchash(sc, enm->enm_addrlo);
2256
2257 reg = (hash >> 5) & 0x7f;
2258 bit = hash & 0x1f;
2259
2260 hash = CSR_READ(sc, mta_reg + (reg << 2));
2261 hash |= 1U << bit;
2262
2263 /* XXX Hardware bug?? */
2264 if (sc->sc_type == WM_T_CORDOVA && (reg & 0xe) == 1) {
2265 bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
2266 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2267 CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
2268 } else
2269 CSR_WRITE(sc, mta_reg + (reg << 2), hash);
2270
2271 ETHER_NEXT_MULTI(step, enm);
2272 }
2273
2274 ifp->if_flags &= ~IFF_ALLMULTI;
2275 goto setit;
2276
2277 allmulti:
2278 ifp->if_flags |= IFF_ALLMULTI;
2279 sc->sc_rctl |= RCTL_MPE;
2280
2281 setit:
2282 CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
2283 }
2284
2285 /*
2286 * wm_tbi_mediainit:
2287 *
2288 * Initialize media for use on 1000BASE-X devices.
2289 */
2290 void
2291 wm_tbi_mediainit(struct wm_softc *sc)
2292 {
2293 const char *sep = "";
2294
2295 if (sc->sc_type < WM_T_LIVENGOOD)
2296 sc->sc_tipg = TIPG_WM_DFLT;
2297 else
2298 sc->sc_tipg = TIPG_LG_DFLT;
2299
2300 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_tbi_mediachange,
2301 wm_tbi_mediastatus);
2302
2303 /*
2304 * SWD Pins:
2305 *
2306 * 0 = Link LED (output)
2307 * 1 = Loss Of Signal (input)
2308 */
2309 sc->sc_ctrl |= CTRL_SWDPIO(0);
2310 sc->sc_ctrl &= ~CTRL_SWDPIO(1);
2311
2312 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2313
2314 #define ADD(s, m, d) \
2315 do { \
2316 printf("%s%s", sep, s); \
2317 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(m), (d), NULL); \
2318 sep = ", "; \
2319 } while (/*CONSTCOND*/0)
2320
2321 printf("%s: ", sc->sc_dev.dv_xname);
2322 ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
2323 ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
2324 ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
2325 printf("\n");
2326
2327 #undef ADD
2328
2329 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2330 }
2331
2332 /*
2333 * wm_tbi_mediastatus: [ifmedia interface function]
2334 *
2335 * Get the current interface media status on a 1000BASE-X device.
2336 */
2337 void
2338 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2339 {
2340 struct wm_softc *sc = ifp->if_softc;
2341
2342 ifmr->ifm_status = IFM_AVALID;
2343 ifmr->ifm_active = IFM_ETHER;
2344
2345 if (sc->sc_tbi_linkup == 0) {
2346 ifmr->ifm_active |= IFM_NONE;
2347 return;
2348 }
2349
2350 ifmr->ifm_status |= IFM_ACTIVE;
2351 ifmr->ifm_active |= IFM_1000_SX;
2352 if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
2353 ifmr->ifm_active |= IFM_FDX;
2354 }
2355
2356 /*
2357 * wm_tbi_mediachange: [ifmedia interface function]
2358 *
2359 * Set hardware to newly-selected media on a 1000BASE-X device.
2360 */
2361 int
2362 wm_tbi_mediachange(struct ifnet *ifp)
2363 {
2364 struct wm_softc *sc = ifp->if_softc;
2365 struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
2366 uint32_t status;
2367 int i;
2368
2369 sc->sc_txcw = ife->ifm_data;
2370 if (sc->sc_ctrl & CTRL_RFCE)
2371 sc->sc_txcw |= ANAR_X_PAUSE_TOWARDS;
2372 if (sc->sc_ctrl & CTRL_TFCE)
2373 sc->sc_txcw |= ANAR_X_PAUSE_ASYM;
2374 sc->sc_txcw |= TXCW_ANE;
2375
2376 CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
2377 delay(10000);
2378
2379 sc->sc_tbi_anstate = 0;
2380
2381 if ((CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1)) == 0) {
2382 /* Have signal; wait for the link to come up. */
2383 for (i = 0; i < 50; i++) {
2384 delay(10000);
2385 if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
2386 break;
2387 }
2388
2389 status = CSR_READ(sc, WMREG_STATUS);
2390 if (status & STATUS_LU) {
2391 /* Link is up. */
2392 DPRINTF(WM_DEBUG_LINK,
2393 ("%s: LINK: set media -> link up %s\n",
2394 sc->sc_dev.dv_xname,
2395 (status & STATUS_FD) ? "FDX" : "HDX"));
2396 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2397 if (status & STATUS_FD)
2398 sc->sc_tctl |=
2399 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2400 else
2401 sc->sc_tctl |=
2402 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2403 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2404 sc->sc_tbi_linkup = 1;
2405 } else {
2406 /* Link is down. */
2407 DPRINTF(WM_DEBUG_LINK,
2408 ("%s: LINK: set media -> link down\n",
2409 sc->sc_dev.dv_xname));
2410 sc->sc_tbi_linkup = 0;
2411 }
2412 } else {
2413 DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
2414 sc->sc_dev.dv_xname));
2415 sc->sc_tbi_linkup = 0;
2416 }
2417
2418 wm_tbi_set_linkled(sc);
2419
2420 return (0);
2421 }
2422
2423 /*
2424 * wm_tbi_set_linkled:
2425 *
2426 * Update the link LED on 1000BASE-X devices.
2427 */
2428 void
2429 wm_tbi_set_linkled(struct wm_softc *sc)
2430 {
2431
2432 if (sc->sc_tbi_linkup)
2433 sc->sc_ctrl |= CTRL_SWDPIN(0);
2434 else
2435 sc->sc_ctrl &= ~CTRL_SWDPIN(0);
2436
2437 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2438 }
2439
2440 /*
2441 * wm_tbi_check_link:
2442 *
2443 * Check the link on 1000BASE-X devices.
2444 */
2445 void
2446 wm_tbi_check_link(struct wm_softc *sc)
2447 {
2448 uint32_t rxcw, ctrl, status;
2449
2450 if (sc->sc_tbi_anstate == 0)
2451 return;
2452 else if (sc->sc_tbi_anstate > 1) {
2453 DPRINTF(WM_DEBUG_LINK,
2454 ("%s: LINK: anstate %d\n", sc->sc_dev.dv_xname,
2455 sc->sc_tbi_anstate));
2456 sc->sc_tbi_anstate--;
2457 return;
2458 }
2459
2460 sc->sc_tbi_anstate = 0;
2461
2462 rxcw = CSR_READ(sc, WMREG_RXCW);
2463 ctrl = CSR_READ(sc, WMREG_CTRL);
2464 status = CSR_READ(sc, WMREG_STATUS);
2465
2466 if ((status & STATUS_LU) == 0) {
2467 DPRINTF(WM_DEBUG_LINK,
2468 ("%s: LINK: checklink -> down\n", sc->sc_dev.dv_xname));
2469 sc->sc_tbi_linkup = 0;
2470 } else {
2471 DPRINTF(WM_DEBUG_LINK,
2472 ("%s: LINK: checklink -> up %s\n", sc->sc_dev.dv_xname,
2473 (status & STATUS_FD) ? "FDX" : "HDX"));
2474 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2475 if (status & STATUS_FD)
2476 sc->sc_tctl |=
2477 TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2478 else
2479 sc->sc_tctl |=
2480 TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2481 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2482 sc->sc_tbi_linkup = 1;
2483 }
2484
2485 wm_tbi_set_linkled(sc);
2486 }
2487
2488 /*
2489 * wm_gmii_reset:
2490 *
2491 * Reset the PHY.
2492 */
2493 void
2494 wm_gmii_reset(struct wm_softc *sc)
2495 {
2496 uint32_t reg;
2497
2498 if (sc->sc_type >= WM_T_CORDOVA) {
2499 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
2500 delay(20000);
2501
2502 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2503 delay(20000);
2504 } else {
2505 /* The PHY reset pin is active-low. */
2506 reg = CSR_READ(sc, WMREG_CTRL_EXT);
2507 reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
2508 CTRL_EXT_SWDPIN(4));
2509 reg |= CTRL_EXT_SWDPIO(4);
2510
2511 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2512 delay(10);
2513
2514 CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2515 delay(10);
2516
2517 CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
2518 delay(10);
2519 #if 0
2520 sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
2521 #endif
2522 }
2523 }
2524
2525 /*
2526 * wm_gmii_mediainit:
2527 *
2528 * Initialize media for use on 1000BASE-T devices.
2529 */
2530 void
2531 wm_gmii_mediainit(struct wm_softc *sc)
2532 {
2533 struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2534
2535 /* We have MII. */
2536 sc->sc_flags |= WM_F_HAS_MII;
2537
2538 sc->sc_tipg = TIPG_1000T_DFLT;
2539
2540 /*
2541 * Let the chip set speed/duplex on its own based on
2542 * signals from the PHY.
2543 */
2544 sc->sc_ctrl |= CTRL_SLU | CTRL_ASDE;
2545 CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2546
2547 /* Initialize our media structures and probe the GMII. */
2548 sc->sc_mii.mii_ifp = ifp;
2549
2550 if (sc->sc_type >= WM_T_CORDOVA) {
2551 sc->sc_mii.mii_readreg = wm_gmii_cordova_readreg;
2552 sc->sc_mii.mii_writereg = wm_gmii_cordova_writereg;
2553 } else {
2554 sc->sc_mii.mii_readreg = wm_gmii_livengood_readreg;
2555 sc->sc_mii.mii_writereg = wm_gmii_livengood_writereg;
2556 }
2557 sc->sc_mii.mii_statchg = wm_gmii_statchg;
2558
2559 wm_gmii_reset(sc);
2560
2561 ifmedia_init(&sc->sc_mii.mii_media, 0, wm_gmii_mediachange,
2562 wm_gmii_mediastatus);
2563
2564 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
2565 MII_OFFSET_ANY, 0);
2566 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
2567 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
2568 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE);
2569 } else
2570 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
2571 }
2572
2573 /*
2574 * wm_gmii_mediastatus: [ifmedia interface function]
2575 *
2576 * Get the current interface media status on a 1000BASE-T device.
2577 */
2578 void
2579 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
2580 {
2581 struct wm_softc *sc = ifp->if_softc;
2582
2583 mii_pollstat(&sc->sc_mii);
2584 ifmr->ifm_status = sc->sc_mii.mii_media_status;
2585 ifmr->ifm_active = sc->sc_mii.mii_media_active;
2586 }
2587
2588 /*
2589 * wm_gmii_mediachange: [ifmedia interface function]
2590 *
2591 * Set hardware to newly-selected media on a 1000BASE-T device.
2592 */
2593 int
2594 wm_gmii_mediachange(struct ifnet *ifp)
2595 {
2596 struct wm_softc *sc = ifp->if_softc;
2597
2598 if (ifp->if_flags & IFF_UP)
2599 mii_mediachg(&sc->sc_mii);
2600 return (0);
2601 }
2602
2603 #define MDI_IO CTRL_SWDPIN(2)
2604 #define MDI_DIR CTRL_SWDPIO(2) /* host -> PHY */
2605 #define MDI_CLK CTRL_SWDPIN(3)
2606
2607 static void
2608 livengood_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
2609 {
2610 uint32_t i, v;
2611
2612 v = CSR_READ(sc, WMREG_CTRL);
2613 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2614 v |= MDI_DIR | CTRL_SWDPIO(3);
2615
2616 for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
2617 if (data & i)
2618 v |= MDI_IO;
2619 else
2620 v &= ~MDI_IO;
2621 CSR_WRITE(sc, WMREG_CTRL, v);
2622 delay(10);
2623 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2624 delay(10);
2625 CSR_WRITE(sc, WMREG_CTRL, v);
2626 delay(10);
2627 }
2628 }
2629
2630 static uint32_t
2631 livengood_mii_recvbits(struct wm_softc *sc)
2632 {
2633 uint32_t v, i, data = 0;
2634
2635 v = CSR_READ(sc, WMREG_CTRL);
2636 v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
2637 v |= CTRL_SWDPIO(3);
2638
2639 CSR_WRITE(sc, WMREG_CTRL, v);
2640 delay(10);
2641 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2642 delay(10);
2643 CSR_WRITE(sc, WMREG_CTRL, v);
2644 delay(10);
2645
2646 for (i = 0; i < 16; i++) {
2647 data <<= 1;
2648 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2649 delay(10);
2650 if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
2651 data |= 1;
2652 CSR_WRITE(sc, WMREG_CTRL, v);
2653 delay(10);
2654 }
2655
2656 CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
2657 delay(10);
2658 CSR_WRITE(sc, WMREG_CTRL, v);
2659 delay(10);
2660
2661 return (data);
2662 }
2663
2664 #undef MDI_IO
2665 #undef MDI_DIR
2666 #undef MDI_CLK
2667
2668 /*
2669 * wm_gmii_livengood_readreg: [mii interface function]
2670 *
2671 * Read a PHY register on the GMII (Livengood version).
2672 */
2673 int
2674 wm_gmii_livengood_readreg(struct device *self, int phy, int reg)
2675 {
2676 struct wm_softc *sc = (void *) self;
2677 int rv;
2678
2679 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2680 livengood_mii_sendbits(sc, reg | (phy << 5) |
2681 (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
2682 rv = livengood_mii_recvbits(sc) & 0xffff;
2683
2684 DPRINTF(WM_DEBUG_GMII,
2685 ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
2686 sc->sc_dev.dv_xname, phy, reg, rv));
2687
2688 return (rv);
2689 }
2690
2691 /*
2692 * wm_gmii_livengood_writereg: [mii interface function]
2693 *
2694 * Write a PHY register on the GMII (Livengood version).
2695 */
2696 void
2697 wm_gmii_livengood_writereg(struct device *self, int phy, int reg, int val)
2698 {
2699 struct wm_softc *sc = (void *) self;
2700
2701 livengood_mii_sendbits(sc, 0xffffffffU, 32);
2702 livengood_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
2703 (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
2704 (MII_COMMAND_START << 30), 32);
2705 }
2706
2707 /*
2708 * wm_gmii_cordova_readreg: [mii interface function]
2709 *
2710 * Read a PHY register on the GMII.
2711 */
2712 int
2713 wm_gmii_cordova_readreg(struct device *self, int phy, int reg)
2714 {
2715 struct wm_softc *sc = (void *) self;
2716 uint32_t mdic;
2717 int i, rv;
2718
2719 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
2720 MDIC_REGADD(reg));
2721
2722 for (i = 0; i < 100; i++) {
2723 mdic = CSR_READ(sc, WMREG_MDIC);
2724 if (mdic & MDIC_READY)
2725 break;
2726 delay(10);
2727 }
2728
2729 if ((mdic & MDIC_READY) == 0) {
2730 printf("%s: MDIC read timed out: phy %d reg %d\n",
2731 sc->sc_dev.dv_xname, phy, reg);
2732 rv = 0;
2733 } else if (mdic & MDIC_E) {
2734 #if 0 /* This is normal if no PHY is present. */
2735 printf("%s: MDIC read error: phy %d reg %d\n",
2736 sc->sc_dev.dv_xname, phy, reg);
2737 #endif
2738 rv = 0;
2739 } else {
2740 rv = MDIC_DATA(mdic);
2741 if (rv == 0xffff)
2742 rv = 0;
2743 }
2744
2745 return (rv);
2746 }
2747
2748 /*
2749 * wm_gmii_cordova_writereg: [mii interface function]
2750 *
2751 * Write a PHY register on the GMII.
2752 */
2753 void
2754 wm_gmii_cordova_writereg(struct device *self, int phy, int reg, int val)
2755 {
2756 struct wm_softc *sc = (void *) self;
2757 uint32_t mdic;
2758 int i;
2759
2760 CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
2761 MDIC_REGADD(reg) | MDIC_DATA(val));
2762
2763 for (i = 0; i < 100; i++) {
2764 mdic = CSR_READ(sc, WMREG_MDIC);
2765 if (mdic & MDIC_READY)
2766 break;
2767 delay(10);
2768 }
2769
2770 if ((mdic & MDIC_READY) == 0)
2771 printf("%s: MDIC write timed out: phy %d reg %d\n",
2772 sc->sc_dev.dv_xname, phy, reg);
2773 else if (mdic & MDIC_E)
2774 printf("%s: MDIC write error: phy %d reg %d\n",
2775 sc->sc_dev.dv_xname, phy, reg);
2776 }
2777
2778 /*
2779 * wm_gmii_statchg: [mii interface function]
2780 *
2781 * Callback from MII layer when media changes.
2782 */
2783 void
2784 wm_gmii_statchg(struct device *self)
2785 {
2786 struct wm_softc *sc = (void *) self;
2787
2788 sc->sc_tctl &= ~TCTL_COLD(0x3ff);
2789
2790 if (sc->sc_mii.mii_media_active & IFM_FDX) {
2791 DPRINTF(WM_DEBUG_LINK,
2792 ("%s: LINK: statchg: FDX\n", sc->sc_dev.dv_xname));
2793 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
2794 } else {
2795 DPRINTF(WM_DEBUG_LINK,
2796 ("%s: LINK: statchg: HDX\n", sc->sc_dev.dv_xname));
2797 sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
2798 }
2799
2800 CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
2801 }
2802